aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/spi-stmp.c679
-rw-r--r--drivers/spi/spi-tegra.c1666
-rw-r--r--drivers/spi/spi_slave_tegra.c1401
3 files changed, 3746 insertions, 0 deletions
diff --git a/drivers/spi/spi-stmp.c b/drivers/spi/spi-stmp.c
new file mode 100644
index 00000000000..fadff76eb7e
--- /dev/null
+++ b/drivers/spi/spi-stmp.c
@@ -0,0 +1,679 @@
1/*
2 * Freescale STMP378X SPI master driver
3 *
4 * Author: dmitry pervushin <dimka@embeddedalley.com>
5 *
6 * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
7 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
8 */
9
10/*
11 * The code contained herein is licensed under the GNU General Public
12 * License. You may obtain a copy of the GNU General Public License
13 * Version 2 or later at the following locations:
14 *
15 * http://www.opensource.org/licenses/gpl-license.html
16 * http://www.gnu.org/copyleft/gpl.html
17 */
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
23#include <linux/err.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28
29#include <mach/platform.h>
30#include <mach/stmp3xxx.h>
31#include <mach/dma.h>
32#include <mach/regs-ssp.h>
33#include <mach/regs-apbh.h>
34
35
36/* 0 means DMA mode(recommended, default), !0 - PIO mode */
37static int pio;
38static int clock;
39
40/* default timeout for busy waits is 2 seconds */
41#define STMP_SPI_TIMEOUT (2 * HZ)
42
43struct stmp_spi {
44 int id;
45
46 void * __iomem regs; /* vaddr of the control registers */
47
48 int irq, err_irq;
49 u32 dma;
50 struct stmp3xxx_dma_descriptor d;
51
52 u32 speed_khz;
53 u32 saved_timings;
54 u32 divider;
55
56 struct clk *clk;
57 struct device *master_dev;
58
59 struct work_struct work;
60 struct workqueue_struct *workqueue;
61
62 /* lock protects queue access */
63 spinlock_t lock;
64 struct list_head queue;
65
66 struct completion done;
67};
68
69#define busy_wait(cond) \
70 ({ \
71 unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \
72 bool succeeded = false; \
73 do { \
74 if (cond) { \
75 succeeded = true; \
76 break; \
77 } \
78 cpu_relax(); \
79 } while (time_before(jiffies, end_jiffies)); \
80 succeeded; \
81 })
82
83/**
84 * stmp_spi_init_hw
85 * Initialize the SSP port
86 */
87static int stmp_spi_init_hw(struct stmp_spi *ss)
88{
89 int err = 0;
90 void *pins = ss->master_dev->platform_data;
91
92 err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev));
93 if (err)
94 goto out;
95
96 ss->clk = clk_get(NULL, "ssp");
97 if (IS_ERR(ss->clk)) {
98 err = PTR_ERR(ss->clk);
99 goto out_free_pins;
100 }
101 clk_enable(ss->clk);
102
103 stmp3xxx_reset_block(ss->regs, false);
104 stmp3xxx_dma_reset_channel(ss->dma);
105
106 return 0;
107
108out_free_pins:
109 stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
110out:
111 return err;
112}
113
114static void stmp_spi_release_hw(struct stmp_spi *ss)
115{
116 void *pins = ss->master_dev->platform_data;
117
118 if (ss->clk && !IS_ERR(ss->clk)) {
119 clk_disable(ss->clk);
120 clk_put(ss->clk);
121 }
122 stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
123}
124
125static int stmp_spi_setup_transfer(struct spi_device *spi,
126 struct spi_transfer *t)
127{
128 u8 bits_per_word;
129 u32 hz;
130 struct stmp_spi *ss = spi_master_get_devdata(spi->master);
131 u16 rate;
132
133 bits_per_word = spi->bits_per_word;
134 if (t && t->bits_per_word)
135 bits_per_word = t->bits_per_word;
136
137 /*
138 * Calculate speed:
139 * - by default, use maximum speed from ssp clk
140 * - if device overrides it, use it
141 * - if transfer specifies other speed, use transfer's one
142 */
143 hz = 1000 * ss->speed_khz / ss->divider;
144 if (spi->max_speed_hz)
145 hz = min(hz, spi->max_speed_hz);
146 if (t && t->speed_hz)
147 hz = min(hz, t->speed_hz);
148
149 if (hz == 0) {
150 dev_err(&spi->dev, "Cannot continue with zero clock\n");
151 return -EINVAL;
152 }
153
154 if (bits_per_word != 8) {
155 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
156 __func__, bits_per_word);
157 return -EINVAL;
158 }
159
160 dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n",
161 hz, ss->speed_khz, ss->divider,
162 ss->speed_khz * 1000 / ss->divider);
163
164 if (ss->speed_khz * 1000 / ss->divider < hz) {
165 dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
166 __func__, hz);
167 return -EINVAL;
168 }
169
170 rate = 1000 * ss->speed_khz/ss->divider/hz;
171
172 writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) |
173 BF(rate - 1, SSP_TIMING_CLOCK_RATE),
174 HW_SSP_TIMING + ss->regs);
175
176 writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) |
177 BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) |
178 ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
179 ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) |
180 (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE),
181 ss->regs + HW_SSP_CTRL1);
182
183 return 0;
184}
185
186static int stmp_spi_setup(struct spi_device *spi)
187{
188 /* spi_setup() does basic checks,
189 * stmp_spi_setup_transfer() does more later
190 */
191 if (spi->bits_per_word != 8) {
192 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
193 __func__, spi->bits_per_word);
194 return -EINVAL;
195 }
196 return 0;
197}
198
199static inline u32 stmp_spi_cs(unsigned cs)
200{
201 return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) |
202 ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0);
203}
204
205static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
206 unsigned char *buf, dma_addr_t dma_buf, int len,
207 int first, int last, bool write)
208{
209 u32 c0 = 0;
210 dma_addr_t spi_buf_dma = dma_buf;
211 int status = 0;
212 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
213
214 c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0);
215 c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0);
216 c0 |= (write ? 0 : BM_SSP_CTRL0_READ);
217 c0 |= BM_SSP_CTRL0_DATA_XFER;
218
219 c0 |= stmp_spi_cs(cs);
220
221 c0 |= BF(len, SSP_CTRL0_XFER_COUNT);
222
223 if (!dma_buf)
224 spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir);
225
226 ss->d.command->cmd =
227 BF(len, APBH_CHn_CMD_XFER_COUNT) |
228 BF(1, APBH_CHn_CMD_CMDWORDS) |
229 BM_APBH_CHn_CMD_WAIT4ENDCMD |
230 BM_APBH_CHn_CMD_IRQONCMPLT |
231 BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ :
232 BV_APBH_CHn_CMD_COMMAND__DMA_WRITE,
233 APBH_CHn_CMD_COMMAND);
234 ss->d.command->pio_words[0] = c0;
235 ss->d.command->buf_ptr = spi_buf_dma;
236
237 stmp3xxx_dma_reset_channel(ss->dma);
238 stmp3xxx_dma_clear_interrupt(ss->dma);
239 stmp3xxx_dma_enable_interrupt(ss->dma);
240 init_completion(&ss->done);
241 stmp3xxx_dma_go(ss->dma, &ss->d, 1);
242 wait_for_completion(&ss->done);
243
244 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
245 status = -ETIMEDOUT;
246
247 if (!dma_buf)
248 dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
249
250 return status;
251}
252
253static inline void stmp_spi_enable(struct stmp_spi *ss)
254{
255 stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
256 stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
257}
258
259static inline void stmp_spi_disable(struct stmp_spi *ss)
260{
261 stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
262 stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
263}
264
265static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs,
266 unsigned char *buf, int len,
267 bool first, bool last, bool write)
268{
269 if (first)
270 stmp_spi_enable(ss);
271
272 stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0);
273
274 while (len--) {
275 if (last && len <= 0)
276 stmp_spi_disable(ss);
277
278 stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT,
279 ss->regs + HW_SSP_CTRL0);
280 stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0);
281
282 if (write)
283 stmp3xxx_clearl(BM_SSP_CTRL0_READ,
284 ss->regs + HW_SSP_CTRL0);
285 else
286 stmp3xxx_setl(BM_SSP_CTRL0_READ,
287 ss->regs + HW_SSP_CTRL0);
288
289 /* Run! */
290 stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0);
291
292 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
293 BM_SSP_CTRL0_RUN))
294 break;
295
296 if (write)
297 writel(*buf, ss->regs + HW_SSP_DATA);
298
299 /* Set TRANSFER */
300 stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0);
301
302 if (!write) {
303 if (busy_wait((readl(ss->regs + HW_SSP_STATUS) &
304 BM_SSP_STATUS_FIFO_EMPTY)))
305 break;
306 *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF;
307 }
308
309 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
310 BM_SSP_CTRL0_RUN))
311 break;
312
313 /* advance to the next byte */
314 buf++;
315 }
316
317 return len < 0 ? 0 : -ETIMEDOUT;
318}
319
320static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m)
321{
322 bool first, last;
323 struct spi_transfer *t, *tmp_t;
324 int status = 0;
325 int cs;
326
327 cs = m->spi->chip_select;
328
329 list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
330
331 first = (&t->transfer_list == m->transfers.next);
332 last = (&t->transfer_list == m->transfers.prev);
333
334 if (first || t->speed_hz || t->bits_per_word)
335 stmp_spi_setup_transfer(m->spi, t);
336
337 /* reject "not last" transfers which request to change cs */
338 if (t->cs_change && !last) {
339 dev_err(&m->spi->dev,
340 "Message with t->cs_change has been skipped\n");
341 continue;
342 }
343
344 if (t->tx_buf) {
345 status = pio ?
346 stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf,
347 t->len, first, last, true) :
348 stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf,
349 t->tx_dma, t->len, first, last, true);
350#ifdef DEBUG
351 if (t->len < 0x10)
352 print_hex_dump_bytes("Tx ",
353 DUMP_PREFIX_OFFSET,
354 t->tx_buf, t->len);
355 else
356 pr_debug("Tx: %d bytes\n", t->len);
357#endif
358 }
359 if (t->rx_buf) {
360 status = pio ?
361 stmp_spi_txrx_pio(ss, cs, t->rx_buf,
362 t->len, first, last, false) :
363 stmp_spi_txrx_dma(ss, cs, t->rx_buf,
364 t->rx_dma, t->len, first, last, false);
365#ifdef DEBUG
366 if (t->len < 0x10)
367 print_hex_dump_bytes("Rx ",
368 DUMP_PREFIX_OFFSET,
369 t->rx_buf, t->len);
370 else
371 pr_debug("Rx: %d bytes\n", t->len);
372#endif
373 }
374
375 if (t->delay_usecs)
376 udelay(t->delay_usecs);
377
378 if (status)
379 break;
380
381 }
382 return status;
383}
384
385/**
386 * stmp_spi_handle - handle messages from the queue
387 */
388static void stmp_spi_handle(struct work_struct *w)
389{
390 struct stmp_spi *ss = container_of(w, struct stmp_spi, work);
391 unsigned long flags;
392 struct spi_message *m;
393
394 spin_lock_irqsave(&ss->lock, flags);
395 while (!list_empty(&ss->queue)) {
396 m = list_entry(ss->queue.next, struct spi_message, queue);
397 list_del_init(&m->queue);
398 spin_unlock_irqrestore(&ss->lock, flags);
399
400 m->status = stmp_spi_handle_message(ss, m);
401 m->complete(m->context);
402
403 spin_lock_irqsave(&ss->lock, flags);
404 }
405 spin_unlock_irqrestore(&ss->lock, flags);
406
407 return;
408}
409
410/**
411 * stmp_spi_transfer - perform message transfer.
412 * Called indirectly from spi_async, queues all the messages to
413 * spi_handle_message.
414 * @spi: spi device
415 * @m: message to be queued
416 */
417static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m)
418{
419 struct stmp_spi *ss = spi_master_get_devdata(spi->master);
420 unsigned long flags;
421
422 m->status = -EINPROGRESS;
423 spin_lock_irqsave(&ss->lock, flags);
424 list_add_tail(&m->queue, &ss->queue);
425 queue_work(ss->workqueue, &ss->work);
426 spin_unlock_irqrestore(&ss->lock, flags);
427 return 0;
428}
429
430static irqreturn_t stmp_spi_irq(int irq, void *dev_id)
431{
432 struct stmp_spi *ss = dev_id;
433
434 stmp3xxx_dma_clear_interrupt(ss->dma);
435 complete(&ss->done);
436 return IRQ_HANDLED;
437}
438
439static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id)
440{
441 struct stmp_spi *ss = dev_id;
442 u32 c1, st;
443
444 c1 = readl(ss->regs + HW_SSP_CTRL1);
445 st = readl(ss->regs + HW_SSP_STATUS);
446 dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n",
447 __func__, st, c1);
448 stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1);
449
450 return IRQ_HANDLED;
451}
452
453static int __devinit stmp_spi_probe(struct platform_device *dev)
454{
455 int err = 0;
456 struct spi_master *master;
457 struct stmp_spi *ss;
458 struct resource *r;
459
460 master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi));
461 if (master == NULL) {
462 err = -ENOMEM;
463 goto out0;
464 }
465 master->flags = SPI_MASTER_HALF_DUPLEX;
466
467 ss = spi_master_get_devdata(master);
468 platform_set_drvdata(dev, master);
469
470 /* Get resources(memory, IRQ) associated with the device */
471 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
472 if (r == NULL) {
473 err = -ENODEV;
474 goto out_put_master;
475 }
476 ss->regs = ioremap(r->start, resource_size(r));
477 if (!ss->regs) {
478 err = -EINVAL;
479 goto out_put_master;
480 }
481
482 ss->master_dev = &dev->dev;
483 ss->id = dev->id;
484
485 INIT_WORK(&ss->work, stmp_spi_handle);
486 INIT_LIST_HEAD(&ss->queue);
487 spin_lock_init(&ss->lock);
488
489 ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev));
490 if (!ss->workqueue) {
491 err = -ENXIO;
492 goto out_put_master;
493 }
494 master->transfer = stmp_spi_transfer;
495 master->setup = stmp_spi_setup;
496
497 /* the spi->mode bits understood by this driver: */
498 master->mode_bits = SPI_CPOL | SPI_CPHA;
499
500 ss->irq = platform_get_irq(dev, 0);
501 if (ss->irq < 0) {
502 err = ss->irq;
503 goto out_put_master;
504 }
505 ss->err_irq = platform_get_irq(dev, 1);
506 if (ss->err_irq < 0) {
507 err = ss->err_irq;
508 goto out_put_master;
509 }
510
511 r = platform_get_resource(dev, IORESOURCE_DMA, 0);
512 if (r == NULL) {
513 err = -ENODEV;
514 goto out_put_master;
515 }
516
517 ss->dma = r->start;
518 err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev));
519 if (err)
520 goto out_put_master;
521
522 err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d);
523 if (err)
524 goto out_free_dma;
525
526 master->bus_num = dev->id;
527 master->num_chipselect = 1;
528
529 /* SPI controller initializations */
530 err = stmp_spi_init_hw(ss);
531 if (err) {
532 dev_dbg(&dev->dev, "cannot initialize hardware\n");
533 goto out_free_dma_desc;
534 }
535
536 if (clock) {
537 dev_info(&dev->dev, "clock rate forced to %d\n", clock);
538 clk_set_rate(ss->clk, clock);
539 }
540 ss->speed_khz = clk_get_rate(ss->clk);
541 ss->divider = 2;
542 dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n",
543 ss->speed_khz, clk_get_rate(ss->clk), ss->divider);
544
545 /* Register for SPI interrupt */
546 err = request_irq(ss->irq, stmp_spi_irq, 0,
547 dev_name(&dev->dev), ss);
548 if (err) {
549 dev_dbg(&dev->dev, "request_irq failed, %d\n", err);
550 goto out_release_hw;
551 }
552
553 /* ..and shared interrupt for all SSP controllers */
554 err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED,
555 dev_name(&dev->dev), ss);
556 if (err) {
557 dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err);
558 goto out_free_irq;
559 }
560
561 err = spi_register_master(master);
562 if (err) {
563 dev_dbg(&dev->dev, "cannot register spi master, %d\n", err);
564 goto out_free_irq_2;
565 }
566 dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n",
567 (u32)ss->regs, ss->irq, master->bus_num,
568 pio ? "PIO" : "DMA");
569 return 0;
570
571out_free_irq_2:
572 free_irq(ss->err_irq, ss);
573out_free_irq:
574 free_irq(ss->irq, ss);
575out_free_dma_desc:
576 stmp3xxx_dma_free_command(ss->dma, &ss->d);
577out_free_dma:
578 stmp3xxx_dma_release(ss->dma);
579out_release_hw:
580 stmp_spi_release_hw(ss);
581out_put_master:
582 if (ss->workqueue)
583 destroy_workqueue(ss->workqueue);
584 if (ss->regs)
585 iounmap(ss->regs);
586 platform_set_drvdata(dev, NULL);
587 spi_master_put(master);
588out0:
589 return err;
590}
591
592static int __devexit stmp_spi_remove(struct platform_device *dev)
593{
594 struct stmp_spi *ss;
595 struct spi_master *master;
596
597 master = platform_get_drvdata(dev);
598 if (master == NULL)
599 goto out0;
600 ss = spi_master_get_devdata(master);
601
602 spi_unregister_master(master);
603
604 free_irq(ss->err_irq, ss);
605 free_irq(ss->irq, ss);
606 stmp3xxx_dma_free_command(ss->dma, &ss->d);
607 stmp3xxx_dma_release(ss->dma);
608 stmp_spi_release_hw(ss);
609 destroy_workqueue(ss->workqueue);
610 iounmap(ss->regs);
611 spi_master_put(master);
612 platform_set_drvdata(dev, NULL);
613out0:
614 return 0;
615}
616
617#ifdef CONFIG_PM
618static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg)
619{
620 struct stmp_spi *ss;
621 struct spi_master *master;
622
623 master = platform_get_drvdata(pdev);
624 ss = spi_master_get_devdata(master);
625
626 ss->saved_timings = readl(HW_SSP_TIMING + ss->regs);
627 clk_disable(ss->clk);
628
629 return 0;
630}
631
632static int stmp_spi_resume(struct platform_device *pdev)
633{
634 struct stmp_spi *ss;
635 struct spi_master *master;
636
637 master = platform_get_drvdata(pdev);
638 ss = spi_master_get_devdata(master);
639
640 clk_enable(ss->clk);
641 stmp3xxx_reset_block(ss->regs, false);
642 writel(ss->saved_timings, ss->regs + HW_SSP_TIMING);
643
644 return 0;
645}
646
647#else
648#define stmp_spi_suspend NULL
649#define stmp_spi_resume NULL
650#endif
651
652static struct platform_driver stmp_spi_driver = {
653 .probe = stmp_spi_probe,
654 .remove = __devexit_p(stmp_spi_remove),
655 .driver = {
656 .name = "stmp3xxx_ssp",
657 .owner = THIS_MODULE,
658 },
659 .suspend = stmp_spi_suspend,
660 .resume = stmp_spi_resume,
661};
662
663static int __init stmp_spi_init(void)
664{
665 return platform_driver_register(&stmp_spi_driver);
666}
667
668static void __exit stmp_spi_exit(void)
669{
670 platform_driver_unregister(&stmp_spi_driver);
671}
672
673module_init(stmp_spi_init);
674module_exit(stmp_spi_exit);
675module_param(pio, int, S_IRUGO);
676module_param(clock, int, S_IRUGO);
677MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
678MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver");
679MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c
new file mode 100644
index 00000000000..6810f611c55
--- /dev/null
+++ b/drivers/spi/spi-tegra.c
@@ -0,0 +1,1666 @@
1/*
2 * Driver for Nvidia TEGRA spi controller.
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * Author:
7 * Erik Gilling <konkers@android.com>
8 *
9 * Copyright (C) 2010-2011 NVIDIA Corporation
10 *
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 */
21
22/*#define DEBUG 1*/
23/*#define VERBOSE_DEBUG 1*/
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/err.h>
28#include <linux/platform_device.h>
29#include <linux/io.h>
30#include <linux/dma-mapping.h>
31#include <linux/dmapool.h>
32#include <linux/clk.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/completion.h>
36#include <linux/kthread.h>
37#include <linux/pm_runtime.h>
38
39#include <linux/spi/spi.h>
40#include <linux/spi-tegra.h>
41
42#include <mach/dma.h>
43#include <mach/clk.h>
44
45#define SLINK_COMMAND 0x000
46#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
47#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
48#define SLINK_BOTH_EN (1 << 10)
49#define SLINK_CS_SW (1 << 11)
50#define SLINK_CS_VALUE (1 << 12)
51#define SLINK_CS_POLARITY (1 << 13)
52#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
53#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
54#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
55#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
56#define SLINK_IDLE_SDA_MASK (3 << 16)
57#define SLINK_CS_POLARITY1 (1 << 20)
58#define SLINK_CK_SDA (1 << 21)
59#define SLINK_CS_POLARITY2 (1 << 22)
60#define SLINK_CS_POLARITY3 (1 << 23)
61#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
62#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
63#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
64#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
65#define SLINK_IDLE_SCLK_MASK (3 << 24)
66#define SLINK_M_S (1 << 28)
67#define SLINK_WAIT (1 << 29)
68#define SLINK_GO (1 << 30)
69#define SLINK_ENB (1 << 31)
70
71#define SLINK_COMMAND2 0x004
72#define SLINK_LSBFE (1 << 0)
73#define SLINK_SSOE (1 << 1)
74#define SLINK_SPIE (1 << 4)
75#define SLINK_BIDIROE (1 << 6)
76#define SLINK_MODFEN (1 << 7)
77#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
78#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
79#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
80#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
81#define SLINK_FIFO_REFILLS_0 (0 << 22)
82#define SLINK_FIFO_REFILLS_1 (1 << 22)
83#define SLINK_FIFO_REFILLS_2 (2 << 22)
84#define SLINK_FIFO_REFILLS_3 (3 << 22)
85#define SLINK_FIFO_REFILLS_MASK (3 << 22)
86#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
87#define SLINK_SPC0 (1 << 29)
88#define SLINK_TXEN (1 << 30)
89#define SLINK_RXEN (1 << 31)
90
91#define SLINK_STATUS 0x008
92#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
93#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
94#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
95#define SLINK_MODF (1 << 16)
96#define SLINK_RX_UNF (1 << 18)
97#define SLINK_TX_OVF (1 << 19)
98#define SLINK_TX_FULL (1 << 20)
99#define SLINK_TX_EMPTY (1 << 21)
100#define SLINK_RX_FULL (1 << 22)
101#define SLINK_RX_EMPTY (1 << 23)
102#define SLINK_TX_UNF (1 << 24)
103#define SLINK_RX_OVF (1 << 25)
104#define SLINK_TX_FLUSH (1 << 26)
105#define SLINK_RX_FLUSH (1 << 27)
106#define SLINK_SCLK (1 << 28)
107#define SLINK_ERR (1 << 29)
108#define SLINK_RDY (1 << 30)
109#define SLINK_BSY (1 << 31)
110
111#define SLINK_MAS_DATA 0x010
112#define SLINK_SLAVE_DATA 0x014
113
114#define SLINK_DMA_CTL 0x018
115#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
116#define SLINK_TX_TRIG_1 (0 << 16)
117#define SLINK_TX_TRIG_4 (1 << 16)
118#define SLINK_TX_TRIG_8 (2 << 16)
119#define SLINK_TX_TRIG_16 (3 << 16)
120#define SLINK_TX_TRIG_MASK (3 << 16)
121#define SLINK_RX_TRIG_1 (0 << 18)
122#define SLINK_RX_TRIG_4 (1 << 18)
123#define SLINK_RX_TRIG_8 (2 << 18)
124#define SLINK_RX_TRIG_16 (3 << 18)
125#define SLINK_RX_TRIG_MASK (3 << 18)
126#define SLINK_PACKED (1 << 20)
127#define SLINK_PACK_SIZE_4 (0 << 21)
128#define SLINK_PACK_SIZE_8 (1 << 21)
129#define SLINK_PACK_SIZE_16 (2 << 21)
130#define SLINK_PACK_SIZE_32 (3 << 21)
131#define SLINK_PACK_SIZE_MASK (3 << 21)
132#define SLINK_IE_TXC (1 << 26)
133#define SLINK_IE_RXC (1 << 27)
134#define SLINK_DMA_EN (1 << 31)
135
136#define SLINK_STATUS2 0x01c
137#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
138#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
139#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
140
141#define SLINK_TX_FIFO 0x100
142#define SLINK_RX_FIFO 0x180
143
144#define DATA_DIR_TX (1 << 0)
145#define DATA_DIR_RX (1 << 1)
146
147#define SPI_FIFO_DEPTH 32
148#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
149
150
151static const unsigned long spi_tegra_req_sels[] = {
152 TEGRA_DMA_REQ_SEL_SL2B1,
153 TEGRA_DMA_REQ_SEL_SL2B2,
154 TEGRA_DMA_REQ_SEL_SL2B3,
155 TEGRA_DMA_REQ_SEL_SL2B4,
156#ifndef CONFIG_ARCH_TEGRA_2x_SOC
157 TEGRA_DMA_REQ_SEL_SL2B5,
158 TEGRA_DMA_REQ_SEL_SL2B6,
159#endif
160
161};
162
163#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
164#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
165#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
166
167#define SLINK_STATUS2_RESET \
168 (TX_FIFO_EMPTY_COUNT_MAX | \
169 RX_FIFO_FULL_COUNT_ZERO << 16)
170
171#define MAX_CHIP_SELECT 4
172#define SLINK_FIFO_DEPTH 4
173
174struct spi_tegra_data {
175 struct spi_master *master;
176 struct platform_device *pdev;
177 spinlock_t lock;
178 char port_name[32];
179
180 struct clk *clk;
181 struct clk *sclk;
182 void __iomem *base;
183 phys_addr_t phys;
184 unsigned irq;
185
186 u32 cur_speed;
187
188 struct list_head queue;
189 struct spi_transfer *cur;
190 struct spi_device *cur_spi;
191 unsigned cur_pos;
192 unsigned cur_len;
193 unsigned words_per_32bit;
194 unsigned bytes_per_word;
195 unsigned curr_dma_words;
196
197 unsigned cur_direction;
198
199 bool is_dma_allowed;
200
201 struct tegra_dma_req rx_dma_req;
202 struct tegra_dma_channel *rx_dma;
203 u32 *rx_buf;
204 dma_addr_t rx_buf_phys;
205 unsigned cur_rx_pos;
206
207 struct tegra_dma_req tx_dma_req;
208 struct tegra_dma_channel *tx_dma;
209 u32 *tx_buf;
210 dma_addr_t tx_buf_phys;
211 unsigned cur_tx_pos;
212
213 unsigned dma_buf_size;
214 unsigned max_buf_size;
215 bool is_curr_dma_xfer;
216
217 bool is_clkon_always;
218 bool clk_state;
219 bool is_suspended;
220
221 bool is_hw_based_cs;
222
223 struct completion rx_dma_complete;
224 struct completion tx_dma_complete;
225 bool is_transfer_in_progress;
226
227 u32 rx_complete;
228 u32 tx_complete;
229 u32 tx_status;
230 u32 rx_status;
231 u32 status_reg;
232 bool is_packed;
233 unsigned long packed_size;
234
235 u32 command_reg;
236 u32 command2_reg;
237 u32 dma_control_reg;
238 u32 def_command_reg;
239 u32 def_command2_reg;
240
241 struct spi_clk_parent *parent_clk_list;
242 int parent_clk_count;
243 unsigned long max_rate;
244 unsigned long max_parent_rate;
245 int min_div;
246 struct workqueue_struct *spi_workqueue;
247 struct work_struct spi_transfer_work;
248};
249
250static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
251 unsigned long reg)
252{
253 if (!tspi->clk_state)
254 BUG();
255 return readl(tspi->base + reg);
256}
257
258static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
259 unsigned long val, unsigned long reg)
260{
261 if (!tspi->clk_state)
262 BUG();
263 writel(val, tspi->base + reg);
264}
265
266static void spi_tegra_clear_status(struct spi_tegra_data *tspi)
267{
268 unsigned long val;
269 unsigned long val_write = 0;
270
271 val = spi_tegra_readl(tspi, SLINK_STATUS);
272
273 val_write = SLINK_RDY;
274 if (val & SLINK_TX_OVF)
275 val_write |= SLINK_TX_OVF;
276 if (val & SLINK_RX_OVF)
277 val_write |= SLINK_RX_OVF;
278 if (val & SLINK_RX_UNF)
279 val_write |= SLINK_RX_UNF;
280 if (val & SLINK_TX_UNF)
281 val_write |= SLINK_TX_UNF;
282
283 spi_tegra_writel(tspi, val_write, SLINK_STATUS);
284}
285
286static unsigned long spi_tegra_get_packed_size(struct spi_tegra_data *tspi,
287 struct spi_transfer *t)
288{
289 unsigned long val;
290
291 switch (tspi->bytes_per_word) {
292 case 0:
293 val = SLINK_PACK_SIZE_4;
294 break;
295 case 1:
296 val = SLINK_PACK_SIZE_8;
297 break;
298 case 2:
299 val = SLINK_PACK_SIZE_16;
300 break;
301 case 4:
302 val = SLINK_PACK_SIZE_32;
303 break;
304 default:
305 val = 0;
306 }
307 return val;
308}
309
310static unsigned spi_tegra_calculate_curr_xfer_param(
311 struct spi_device *spi, struct spi_tegra_data *tspi,
312 struct spi_transfer *t)
313{
314 unsigned remain_len = t->len - tspi->cur_pos;
315 unsigned max_word;
316 unsigned bits_per_word ;
317 unsigned max_len;
318 unsigned total_fifo_words;
319
320 bits_per_word = t->bits_per_word ? t->bits_per_word :
321 spi->bits_per_word;
322 tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
323
324 if (bits_per_word == 8 || bits_per_word == 16) {
325 tspi->is_packed = 1;
326 tspi->words_per_32bit = 32/bits_per_word;
327 } else {
328 tspi->is_packed = 0;
329 tspi->words_per_32bit = 1;
330 }
331 tspi->packed_size = spi_tegra_get_packed_size(tspi, t);
332
333 if (tspi->is_packed) {
334 max_len = min(remain_len, tspi->max_buf_size);
335 tspi->curr_dma_words = max_len/tspi->bytes_per_word;
336 total_fifo_words = remain_len/4;
337 } else {
338 max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
339 max_word = min(max_word, tspi->max_buf_size/4);
340 tspi->curr_dma_words = max_word;
341 total_fifo_words = remain_len/tspi->bytes_per_word;
342 }
343 return total_fifo_words;
344}
345
346static unsigned spi_tegra_fill_tx_fifo_from_client_txbuf(
347 struct spi_tegra_data *tspi, struct spi_transfer *t)
348{
349 unsigned nbytes;
350 unsigned tx_empty_count;
351 unsigned long fifo_status;
352 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
353 unsigned max_n_32bit;
354 unsigned i, count;
355 unsigned long x;
356 unsigned int written_words;
357
358 fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
359 tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
360
361 if (tspi->is_packed) {
362 nbytes = tspi->curr_dma_words * tspi->bytes_per_word;
363 max_n_32bit = (min(nbytes, tx_empty_count*4) - 1)/4 + 1;
364 for (count = 0; count < max_n_32bit; ++count) {
365 x = 0;
366 for (i = 0; (i < 4) && nbytes; i++, nbytes--)
367 x |= (*tx_buf++) << (i*8);
368 spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
369 }
370 written_words = min(max_n_32bit * tspi->words_per_32bit,
371 tspi->curr_dma_words);
372 } else {
373 max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
374 nbytes = max_n_32bit * tspi->bytes_per_word;
375 for (count = 0; count < max_n_32bit; ++count) {
376 x = 0;
377 for (i = 0; nbytes && (i < tspi->bytes_per_word);
378 ++i, nbytes--)
379 x |= ((*tx_buf++) << i*8);
380 spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
381 }
382 written_words = max_n_32bit;
383 }
384 tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
385 return written_words;
386}
387
388static unsigned int spi_tegra_read_rx_fifo_to_client_rxbuf(
389 struct spi_tegra_data *tspi, struct spi_transfer *t)
390{
391 unsigned rx_full_count;
392 unsigned long fifo_status;
393 u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
394 unsigned i, count;
395 unsigned long x;
396 unsigned int read_words = 0;
397 unsigned len;
398
399 fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
400 rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
401 dev_dbg(&tspi->pdev->dev, "Rx fifo count %d\n", rx_full_count);
402 if (tspi->is_packed) {
403 len = tspi->curr_dma_words * tspi->bytes_per_word;
404 for (count = 0; count < rx_full_count; ++count) {
405 x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
406 for (i = 0; len && (i < 4); ++i, len--)
407 *rx_buf++ = (x >> i*8) & 0xFF;
408 }
409 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
410 read_words += tspi->curr_dma_words;
411 } else {
412 unsigned int rx_mask, bits_per_word;
413
414 bits_per_word = t->bits_per_word ? t->bits_per_word :
415 tspi->cur_spi->bits_per_word;
416 rx_mask = (1 << bits_per_word) - 1;
417 for (count = 0; count < rx_full_count; ++count) {
418 x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
419 x &= rx_mask;
420 for (i = 0; (i < tspi->bytes_per_word); ++i)
421 *rx_buf++ = (x >> (i*8)) & 0xFF;
422 }
423 tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
424 read_words += rx_full_count;
425 }
426 return read_words;
427}
428
429static void spi_tegra_copy_client_txbuf_to_spi_txbuf(
430 struct spi_tegra_data *tspi, struct spi_transfer *t)
431{
432 unsigned len;
433
434 /* Make the dma buffer to read by cpu */
435 dma_sync_single_for_cpu(&tspi->pdev->dev, tspi->tx_buf_phys,
436 tspi->dma_buf_size, DMA_FROM_DEVICE);
437 if (tspi->is_packed) {
438 len = tspi->curr_dma_words * tspi->bytes_per_word;
439 memcpy(tspi->tx_buf, t->tx_buf + tspi->cur_pos, len);
440 } else {
441 unsigned int i;
442 unsigned int count;
443 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
444 unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
445 unsigned int x;
446
447 for (count = 0; count < tspi->curr_dma_words; ++count) {
448 x = 0;
449 for (i = 0; consume && (i < tspi->bytes_per_word);
450 ++i, consume--)
451 x |= ((*tx_buf++) << i*8);
452 tspi->tx_buf[count] = x;
453 }
454 }
455 tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
456 /* Make the dma buffer to read by dma */
457 dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
458 tspi->dma_buf_size, DMA_TO_DEVICE);
459}
460
461static void spi_tegra_copy_spi_rxbuf_to_client_rxbuf(
462 struct spi_tegra_data *tspi, struct spi_transfer *t)
463{
464 unsigned len;
465
466 /* Make the dma buffer to read by cpu */
467 dma_sync_single_for_cpu(&tspi->pdev->dev, tspi->rx_buf_phys,
468 tspi->dma_buf_size, DMA_FROM_DEVICE);
469
470 if (tspi->is_packed) {
471 len = tspi->curr_dma_words * tspi->bytes_per_word;
472 memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_buf, len);
473 } else {
474 unsigned int i;
475 unsigned int count;
476 unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
477 unsigned int x;
478 unsigned int rx_mask, bits_per_word;
479
480 bits_per_word = t->bits_per_word ? t->bits_per_word :
481 tspi->cur_spi->bits_per_word;
482 rx_mask = (1 << bits_per_word) - 1;
483 for (count = 0; count < tspi->curr_dma_words; ++count) {
484 x = tspi->rx_buf[count];
485 x &= rx_mask;
486 for (i = 0; (i < tspi->bytes_per_word); ++i)
487 *rx_buf++ = (x >> (i*8)) & 0xFF;
488 }
489 }
490 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
491
492 /* Make the dma buffer to read by dma */
493 dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
494 tspi->dma_buf_size, DMA_TO_DEVICE);
495}
496
497static int spi_tegra_start_dma_based_transfer(
498 struct spi_tegra_data *tspi, struct spi_transfer *t)
499{
500 unsigned long val;
501 unsigned long test_val;
502 unsigned int len;
503 int ret = 0;
504
505 INIT_COMPLETION(tspi->rx_dma_complete);
506 INIT_COMPLETION(tspi->tx_dma_complete);
507
508 /* Make sure that Rx and Tx fifo are empty */
509 test_val = spi_tegra_readl(tspi, SLINK_STATUS);
510 if (((test_val >> 20) & 0xF) != 0xA)
511 dev_err(&tspi->pdev->dev,
512 "The Rx and Tx fifo are not empty status 0x%08lx\n",
513 test_val);
514
515 val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
516 val |= tspi->packed_size;
517 if (tspi->is_packed)
518 len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
519 4) * 4;
520 else
521 len = tspi->curr_dma_words * 4;
522
523 if (len & 0xF)
524 val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
525 else if (((len) >> 4) & 0x1)
526 val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
527 else
528 val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
529
530 if (tspi->cur_direction & DATA_DIR_TX)
531 val |= SLINK_IE_TXC;
532
533 if (tspi->cur_direction & DATA_DIR_RX)
534 val |= SLINK_IE_RXC;
535
536 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
537 tspi->dma_control_reg = val;
538
539 if (tspi->cur_direction & DATA_DIR_TX) {
540 spi_tegra_copy_client_txbuf_to_spi_txbuf(tspi, t);
541 wmb();
542 /* Make the dma buffer to read by dma */
543 dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
544 tspi->dma_buf_size, DMA_TO_DEVICE);
545 tspi->tx_dma_req.size = len;
546 ret = tegra_dma_enqueue_req(tspi->tx_dma, &tspi->tx_dma_req);
547 if (ret < 0) {
548 dev_err(&tspi->pdev->dev,
549 "Error in starting tx dma error = %d\n", ret);
550 return ret;
551 }
552
553 /* Wait for tx fifo to be fill before starting slink */
554 test_val = spi_tegra_readl(tspi, SLINK_STATUS);
555 while (!(test_val & SLINK_TX_FULL))
556 test_val = spi_tegra_readl(tspi, SLINK_STATUS);
557 }
558
559 if (tspi->cur_direction & DATA_DIR_RX) {
560 /* Make the dma buffer to read by dma */
561 dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
562 tspi->dma_buf_size, DMA_TO_DEVICE);
563 tspi->rx_dma_req.size = len;
564 ret = tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
565 if (ret < 0) {
566 dev_err(&tspi->pdev->dev,
567 "Error in starting rx dma error = %d\n", ret);
568 if (tspi->cur_direction & DATA_DIR_TX)
569 tegra_dma_dequeue_req(tspi->tx_dma,
570 &tspi->tx_dma_req);
571 return ret;
572 }
573 }
574 tspi->is_curr_dma_xfer = true;
575 if (tspi->is_packed) {
576 val |= SLINK_PACKED;
577 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
578 udelay(1);
579 wmb();
580 }
581
582 val |= SLINK_DMA_EN;
583 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
584 return ret;
585}
586
587static int spi_tegra_start_cpu_based_transfer(
588 struct spi_tegra_data *tspi, struct spi_transfer *t)
589{
590 unsigned long val;
591 unsigned curr_words;
592
593 val = tspi->packed_size;
594 if (tspi->cur_direction & DATA_DIR_TX)
595 val |= SLINK_IE_TXC;
596
597 if (tspi->cur_direction & DATA_DIR_RX)
598 val |= SLINK_IE_RXC;
599
600 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
601 tspi->dma_control_reg = val;
602
603 if (tspi->cur_direction & DATA_DIR_TX)
604 curr_words = spi_tegra_fill_tx_fifo_from_client_txbuf(tspi, t);
605 else
606 curr_words = tspi->curr_dma_words;
607 val |= SLINK_DMA_BLOCK_SIZE(curr_words - 1);
608 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
609 tspi->dma_control_reg = val;
610
611 tspi->is_curr_dma_xfer = false;
612 if (tspi->is_packed) {
613 val |= SLINK_PACKED;
614 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
615 udelay(1);
616 wmb();
617 }
618 val |= SLINK_DMA_EN;
619 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
620 return 0;
621}
622
623static void set_best_clk_source(struct spi_tegra_data *tspi,
624 unsigned long speed)
625{
626 long new_rate;
627 unsigned long err_rate;
628 int rate = speed * 4;
629 unsigned int fin_err = speed * 4;
630 int final_index = -1;
631 int count;
632 int ret;
633 struct clk *pclk;
634 unsigned long prate, crate, nrate;
635 unsigned long cdiv;
636
637 if (!tspi->parent_clk_count || !tspi->parent_clk_list)
638 return;
639
640 /* make sure divisor is more than min_div */
641 pclk = clk_get_parent(tspi->clk);
642 prate = clk_get_rate(pclk);
643 crate = clk_get_rate(tspi->clk);
644 cdiv = DIV_ROUND_UP(prate, crate);
645 if (cdiv < tspi->min_div) {
646 nrate = DIV_ROUND_UP(prate, tspi->min_div);
647 clk_set_rate(tspi->clk, nrate);
648 }
649
650 for (count = 0; count < tspi->parent_clk_count; ++count) {
651 if (!tspi->parent_clk_list[count].parent_clk)
652 continue;
653 ret = clk_set_parent(tspi->clk,
654 tspi->parent_clk_list[count].parent_clk);
655 if (ret < 0) {
656 dev_warn(&tspi->pdev->dev,
657 "Error in setting parent clk src %s\n",
658 tspi->parent_clk_list[count].name);
659 continue;
660 }
661
662 new_rate = clk_round_rate(tspi->clk, rate);
663 if (new_rate < 0)
664 continue;
665
666 err_rate = abs(new_rate - rate);
667 if (err_rate < fin_err) {
668 final_index = count;
669 fin_err = err_rate;
670 }
671 }
672
673 if (final_index >= 0) {
674 dev_info(&tspi->pdev->dev, "Setting clk_src %s\n",
675 tspi->parent_clk_list[final_index].name);
676 clk_set_parent(tspi->clk,
677 tspi->parent_clk_list[final_index].parent_clk);
678 }
679}
680
681static void spi_tegra_start_transfer(struct spi_device *spi,
682 struct spi_transfer *t, bool is_first_of_msg,
683 bool is_single_xfer)
684{
685 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
686 u32 speed;
687 u8 bits_per_word;
688 unsigned total_fifo_words;
689 int ret;
690 struct tegra_spi_device_controller_data *cdata = spi->controller_data;
691 unsigned long command;
692 unsigned long command2;
693#ifndef CONFIG_ARCH_TEGRA_2x_SOC
694 unsigned long status2;
695#endif
696 int cs_setup_count;
697 int cs_hold_count;
698
699 unsigned int cs_pol_bit[] = {
700 SLINK_CS_POLARITY,
701 SLINK_CS_POLARITY1,
702 SLINK_CS_POLARITY2,
703 SLINK_CS_POLARITY3,
704 };
705
706 bits_per_word = t->bits_per_word ? t->bits_per_word :
707 spi->bits_per_word;
708
709 speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
710 if (speed != tspi->cur_speed) {
711 set_best_clk_source(tspi, speed);
712 clk_set_rate(tspi->clk, speed * 4);
713 tspi->cur_speed = speed;
714 }
715
716 tspi->cur = t;
717 tspi->cur_spi = spi;
718 tspi->cur_pos = 0;
719 tspi->cur_rx_pos = 0;
720 tspi->cur_tx_pos = 0;
721 tspi->rx_complete = 0;
722 tspi->tx_complete = 0;
723 total_fifo_words = spi_tegra_calculate_curr_xfer_param(spi, tspi, t);
724
725 command2 = tspi->def_command2_reg;
726 if (is_first_of_msg) {
727 if (!tspi->is_clkon_always) {
728 if (!tspi->clk_state) {
729 pm_runtime_get_sync(&tspi->pdev->dev);
730 tspi->clk_state = 1;
731 }
732 }
733
734 spi_tegra_clear_status(tspi);
735
736 command = tspi->def_command_reg;
737 command |= SLINK_BIT_LENGTH(bits_per_word - 1);
738
739 /* possibly use the hw based chip select */
740 tspi->is_hw_based_cs = false;
741 if (cdata && cdata->is_hw_based_cs && is_single_xfer) {
742 if ((tspi->curr_dma_words * tspi->bytes_per_word) ==
743 (t->len - tspi->cur_pos)) {
744 cs_setup_count = cdata->cs_setup_clk_count >> 1;
745 if (cs_setup_count > 3)
746 cs_setup_count = 3;
747 cs_hold_count = cdata->cs_hold_clk_count;
748 if (cs_hold_count > 0xF)
749 cs_hold_count = 0xF;
750 tspi->is_hw_based_cs = true;
751
752 command &= ~SLINK_CS_SW;
753 command2 &= ~SLINK_SS_SETUP(3);
754 command2 |= SLINK_SS_SETUP(cs_setup_count);
755#ifndef CONFIG_ARCH_TEGRA_2x_SOC
756 status2 = spi_tegra_readl(tspi, SLINK_STATUS2);
757 status2 &= ~SLINK_SS_HOLD_TIME(0xF);
758 status2 |= SLINK_SS_HOLD_TIME(cs_hold_count);
759 spi_tegra_writel(tspi, status2, SLINK_STATUS2);
760#endif
761 }
762 }
763 if (!tspi->is_hw_based_cs) {
764 command |= SLINK_CS_SW;
765 command ^= cs_pol_bit[spi->chip_select];
766 }
767
768 command &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA;
769 if (spi->mode & SPI_CPHA)
770 command |= SLINK_CK_SDA;
771
772 if (spi->mode & SPI_CPOL)
773 command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
774 else
775 command |= SLINK_IDLE_SCLK_DRIVE_LOW;
776 } else {
777 command = tspi->command_reg;
778 command &= ~SLINK_BIT_LENGTH(~0);
779 command |= SLINK_BIT_LENGTH(bits_per_word - 1);
780 }
781
782 spi_tegra_writel(tspi, command, SLINK_COMMAND);
783 tspi->command_reg = command;
784
785 dev_dbg(&tspi->pdev->dev, "The def 0x%x and written 0x%lx\n",
786 tspi->def_command_reg, command);
787
788 command2 &= ~(SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN);
789 tspi->cur_direction = 0;
790 if (t->rx_buf) {
791 command2 |= SLINK_RXEN;
792 tspi->cur_direction |= DATA_DIR_RX;
793 }
794 if (t->tx_buf) {
795 command2 |= SLINK_TXEN;
796 tspi->cur_direction |= DATA_DIR_TX;
797 }
798 command2 |= SLINK_SS_EN_CS(spi->chip_select);
799 spi_tegra_writel(tspi, command2, SLINK_COMMAND2);
800 tspi->command2_reg = command2;
801
802 if (total_fifo_words > SPI_FIFO_DEPTH)
803 ret = spi_tegra_start_dma_based_transfer(tspi, t);
804 else
805 ret = spi_tegra_start_cpu_based_transfer(tspi, t);
806 WARN_ON(ret < 0);
807}
808
809static int spi_tegra_setup(struct spi_device *spi)
810{
811 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
812 unsigned long cs_bit;
813 unsigned long val;
814 unsigned long flags;
815
816 dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
817 spi->bits_per_word,
818 spi->mode & SPI_CPOL ? "" : "~",
819 spi->mode & SPI_CPHA ? "" : "~",
820 spi->max_speed_hz);
821
822 BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
823 switch (spi->chip_select) {
824 case 0:
825 cs_bit = SLINK_CS_POLARITY;
826 break;
827
828 case 1:
829 cs_bit = SLINK_CS_POLARITY1;
830 break;
831
832 case 2:
833 cs_bit = SLINK_CS_POLARITY2;
834 break;
835
836 case 3:
837 cs_bit = SLINK_CS_POLARITY3;
838 break;
839
840 default:
841 return -EINVAL;
842 }
843
844 spin_lock_irqsave(&tspi->lock, flags);
845 val = tspi->def_command_reg;
846 if (spi->mode & SPI_CS_HIGH)
847 val |= cs_bit;
848 else
849 val &= ~cs_bit;
850 tspi->def_command_reg = val;
851
852 if (!tspi->is_clkon_always && !tspi->clk_state) {
853 spin_unlock_irqrestore(&tspi->lock, flags);
854 pm_runtime_get_sync(&tspi->pdev->dev);
855 spin_lock_irqsave(&tspi->lock, flags);
856 tspi->clk_state = 1;
857 }
858 spi_tegra_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
859 if (!tspi->is_clkon_always && tspi->clk_state) {
860 tspi->clk_state = 0;
861 spin_unlock_irqrestore(&tspi->lock, flags);
862 pm_runtime_put_sync(&tspi->pdev->dev);
863 } else
864 spin_unlock_irqrestore(&tspi->lock, flags);
865 return 0;
866}
867
868static void tegra_spi_transfer_work(struct work_struct *work)
869{
870 struct spi_tegra_data *tspi;
871 struct spi_device *spi;
872 struct spi_message *m;
873 struct spi_transfer *t;
874 int single_xfer = 0;
875 unsigned long flags;
876
877 tspi = container_of(work, struct spi_tegra_data, spi_transfer_work);
878
879 spin_lock_irqsave(&tspi->lock, flags);
880
881 if (tspi->is_transfer_in_progress || tspi->is_suspended) {
882 spin_unlock_irqrestore(&tspi->lock, flags);
883 return;
884 }
885 if (list_empty(&tspi->queue)) {
886 spin_unlock_irqrestore(&tspi->lock, flags);
887 return;
888 }
889
890 m = list_first_entry(&tspi->queue, struct spi_message, queue);
891 spi = m->state;
892 single_xfer = list_is_singular(&m->transfers);
893 m->actual_length = 0;
894 m->status = 0;
895 t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
896 tspi->is_transfer_in_progress = true;
897
898 spin_unlock_irqrestore(&tspi->lock, flags);
899 spi_tegra_start_transfer(spi, t, true, single_xfer);
900}
901
902static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
903{
904 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
905 struct spi_transfer *t;
906 unsigned long flags;
907 int was_empty;
908 int bytes_per_word;
909
910 if (list_empty(&m->transfers) || !m->complete)
911 return -EINVAL;
912
913 list_for_each_entry(t, &m->transfers, transfer_list) {
914 if (t->bits_per_word < 0 || t->bits_per_word > 32)
915 return -EINVAL;
916
917 if (t->len == 0)
918 return -EINVAL;
919
920 /* Check that the all words are available */
921 if (t->bits_per_word)
922 bytes_per_word = (t->bits_per_word + 7)/8;
923 else
924 bytes_per_word = (spi->bits_per_word + 7)/8;
925
926 if (t->len % bytes_per_word != 0)
927 return -EINVAL;
928
929 if (!t->rx_buf && !t->tx_buf)
930 return -EINVAL;
931 }
932
933 spin_lock_irqsave(&tspi->lock, flags);
934
935 if (WARN_ON(tspi->is_suspended)) {
936 spin_unlock_irqrestore(&tspi->lock, flags);
937 return -EBUSY;
938 }
939
940 m->state = spi;
941 was_empty = list_empty(&tspi->queue);
942 list_add_tail(&m->queue, &tspi->queue);
943 if (was_empty)
944 queue_work(tspi->spi_workqueue, &tspi->spi_transfer_work);
945
946 spin_unlock_irqrestore(&tspi->lock, flags);
947 return 0;
948}
949
950static void spi_tegra_curr_transfer_complete(struct spi_tegra_data *tspi,
951 unsigned err, unsigned cur_xfer_size, unsigned long *irq_flags)
952{
953 struct spi_message *m;
954 struct spi_device *spi;
955 struct spi_transfer *t;
956 int single_xfer = 0;
957
958 /* Check if CS need to be toggele here */
959 if (tspi->cur && tspi->cur->cs_change &&
960 tspi->cur->delay_usecs) {
961 udelay(tspi->cur->delay_usecs);
962 }
963
964 m = list_first_entry(&tspi->queue, struct spi_message, queue);
965 if (err)
966 m->status = -EIO;
967 spi = m->state;
968
969 m->actual_length += cur_xfer_size;
970
971 if (!list_is_last(&tspi->cur->transfer_list, &m->transfers)) {
972 tspi->cur = list_first_entry(&tspi->cur->transfer_list,
973 struct spi_transfer, transfer_list);
974 spin_unlock_irqrestore(&tspi->lock, *irq_flags);
975 spi_tegra_start_transfer(spi, tspi->cur, false, 0);
976 spin_lock_irqsave(&tspi->lock, *irq_flags);
977 } else {
978 list_del(&m->queue);
979 m->complete(m->context);
980 if (!list_empty(&tspi->queue)) {
981 if (tspi->is_suspended) {
982 spi_tegra_writel(tspi, tspi->def_command_reg,
983 SLINK_COMMAND);
984 spi_tegra_writel(tspi, tspi->def_command2_reg,
985 SLINK_COMMAND2);
986 tspi->is_transfer_in_progress = false;
987 return;
988 }
989 m = list_first_entry(&tspi->queue, struct spi_message,
990 queue);
991 spi = m->state;
992 single_xfer = list_is_singular(&m->transfers);
993 m->actual_length = 0;
994 m->status = 0;
995
996 t = list_first_entry(&m->transfers, struct spi_transfer,
997 transfer_list);
998 spin_unlock_irqrestore(&tspi->lock, *irq_flags);
999 spi_tegra_start_transfer(spi, t, true, single_xfer);
1000 spin_lock_irqsave(&tspi->lock, *irq_flags);
1001 } else {
1002 spi_tegra_writel(tspi, tspi->def_command_reg,
1003 SLINK_COMMAND);
1004 spi_tegra_writel(tspi, tspi->def_command2_reg,
1005 SLINK_COMMAND2);
1006 if (!tspi->is_clkon_always) {
1007 if (tspi->clk_state) {
1008 /* Provide delay to stablize the signal
1009 state */
1010 spin_unlock_irqrestore(&tspi->lock,
1011 *irq_flags);
1012 udelay(10);
1013 pm_runtime_put_sync(&tspi->pdev->dev);
1014 spin_lock_irqsave(&tspi->lock,
1015 *irq_flags);
1016 tspi->clk_state = 0;
1017 }
1018 }
1019 tspi->is_transfer_in_progress = false;
1020 /* Check if any new request has come between
1021 * clock disable */
1022 queue_work(tspi->spi_workqueue,
1023 &tspi->spi_transfer_work);
1024 }
1025 }
1026 return;
1027}
1028
1029static void tegra_spi_tx_dma_complete(struct tegra_dma_req *req)
1030{
1031 struct spi_tegra_data *tspi = req->dev;
1032 complete(&tspi->tx_dma_complete);
1033}
1034
1035static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
1036{
1037 struct spi_tegra_data *tspi = req->dev;
1038 complete(&tspi->rx_dma_complete);
1039}
1040
1041static void handle_cpu_based_xfer(void *context_data)
1042{
1043 struct spi_tegra_data *tspi = context_data;
1044 struct spi_transfer *t = tspi->cur;
1045 unsigned long flags;
1046
1047 spin_lock_irqsave(&tspi->lock, flags);
1048 if (tspi->tx_status || tspi->rx_status ||
1049 (tspi->status_reg & SLINK_BSY)) {
1050 dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
1051 __func__, tspi->status_reg);
1052 tegra_periph_reset_assert(tspi->clk);
1053 udelay(2);
1054 tegra_periph_reset_deassert(tspi->clk);
1055 WARN_ON(1);
1056 spi_tegra_curr_transfer_complete(tspi,
1057 tspi->tx_status || tspi->rx_status, t->len, &flags);
1058 goto exit;
1059 }
1060
1061 dev_vdbg(&tspi->pdev->dev, "Current direction %x\n",
1062 tspi->cur_direction);
1063 if (tspi->cur_direction & DATA_DIR_RX)
1064 spi_tegra_read_rx_fifo_to_client_rxbuf(tspi, t);
1065
1066 if (tspi->cur_direction & DATA_DIR_TX)
1067 tspi->cur_pos = tspi->cur_tx_pos;
1068 else if (tspi->cur_direction & DATA_DIR_RX)
1069 tspi->cur_pos = tspi->cur_rx_pos;
1070 else
1071 WARN_ON(1);
1072
1073 dev_vdbg(&tspi->pdev->dev,
1074 "current position %d and length of the transfer %d\n",
1075 tspi->cur_pos, t->len);
1076 if (tspi->cur_pos == t->len) {
1077 spi_tegra_curr_transfer_complete(tspi,
1078 tspi->tx_status || tspi->rx_status, t->len, &flags);
1079 goto exit;
1080 }
1081
1082 spi_tegra_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
1083 spi_tegra_start_cpu_based_transfer(tspi, t);
1084exit:
1085 spin_unlock_irqrestore(&tspi->lock, flags);
1086 return;
1087}
1088
1089static irqreturn_t spi_tegra_isr_thread(int irq, void *context_data)
1090{
1091 struct spi_tegra_data *tspi = context_data;
1092 struct spi_transfer *t = tspi->cur;
1093 long wait_status;
1094 int err = 0;
1095 unsigned total_fifo_words;
1096 unsigned long flags;
1097
1098 if (!tspi->is_curr_dma_xfer) {
1099 handle_cpu_based_xfer(context_data);
1100 return IRQ_HANDLED;
1101 }
1102
1103 /* Abort dmas if any error */
1104 if (tspi->cur_direction & DATA_DIR_TX) {
1105 if (tspi->tx_status) {
1106 tegra_dma_dequeue_req(tspi->tx_dma, &tspi->tx_dma_req);
1107 err += 1;
1108 } else {
1109 wait_status = wait_for_completion_interruptible_timeout(
1110 &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
1111 if (wait_status <= 0) {
1112 tegra_dma_dequeue_req(tspi->tx_dma,
1113 &tspi->tx_dma_req);
1114 dev_err(&tspi->pdev->dev,
1115 "Error in Dma Tx transfer\n");
1116 err += 1;
1117 }
1118 }
1119 }
1120
1121 if (tspi->cur_direction & DATA_DIR_RX) {
1122 if (tspi->rx_status) {
1123 tegra_dma_dequeue_req(tspi->rx_dma, &tspi->rx_dma_req);
1124 err += 2;
1125 } else {
1126 wait_status = wait_for_completion_interruptible_timeout(
1127 &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
1128 if (wait_status <= 0) {
1129 tegra_dma_dequeue_req(tspi->rx_dma,
1130 &tspi->rx_dma_req);
1131 dev_err(&tspi->pdev->dev,
1132 "Error in Dma Rx transfer\n");
1133 err += 2;
1134 }
1135 }
1136 }
1137
1138 spin_lock_irqsave(&tspi->lock, flags);
1139 if (err) {
1140 dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
1141 __func__, tspi->status_reg);
1142 tegra_periph_reset_assert(tspi->clk);
1143 udelay(2);
1144 tegra_periph_reset_deassert(tspi->clk);
1145 WARN_ON(1);
1146 spi_tegra_curr_transfer_complete(tspi, err, t->len, &flags);
1147 spin_unlock_irqrestore(&tspi->lock, flags);
1148 return IRQ_HANDLED;
1149 }
1150
1151 if (tspi->cur_direction & DATA_DIR_RX)
1152 spi_tegra_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
1153
1154 if (tspi->cur_direction & DATA_DIR_TX)
1155 tspi->cur_pos = tspi->cur_tx_pos;
1156 else if (tspi->cur_direction & DATA_DIR_RX)
1157 tspi->cur_pos = tspi->cur_rx_pos;
1158 else
1159 WARN_ON(1);
1160
1161 if (tspi->cur_pos == t->len) {
1162 spi_tegra_curr_transfer_complete(tspi,
1163 tspi->tx_status || tspi->rx_status, t->len, &flags);
1164 spin_unlock_irqrestore(&tspi->lock, flags);
1165 return IRQ_HANDLED;
1166 }
1167
1168 /* Continue transfer in current message */
1169 total_fifo_words = spi_tegra_calculate_curr_xfer_param(tspi->cur_spi,
1170 tspi, t);
1171 if (total_fifo_words > SPI_FIFO_DEPTH)
1172 err = spi_tegra_start_dma_based_transfer(tspi, t);
1173 else
1174 err = spi_tegra_start_cpu_based_transfer(tspi, t);
1175
1176 spin_unlock_irqrestore(&tspi->lock, flags);
1177 WARN_ON(err < 0);
1178 return IRQ_HANDLED;
1179}
1180
1181static irqreturn_t spi_tegra_isr(int irq, void *context_data)
1182{
1183 struct spi_tegra_data *tspi = context_data;
1184
1185 tspi->status_reg = spi_tegra_readl(tspi, SLINK_STATUS);
1186 if (tspi->cur_direction & DATA_DIR_TX)
1187 tspi->tx_status = tspi->status_reg &
1188 (SLINK_TX_OVF | SLINK_TX_UNF);
1189
1190 if (tspi->cur_direction & DATA_DIR_RX)
1191 tspi->rx_status = tspi->status_reg &
1192 (SLINK_RX_OVF | SLINK_RX_UNF);
1193 spi_tegra_clear_status(tspi);
1194
1195
1196 return IRQ_WAKE_THREAD;
1197}
1198
1199static int __init spi_tegra_probe(struct platform_device *pdev)
1200{
1201 struct spi_master *master;
1202 struct spi_tegra_data *tspi;
1203 struct resource *r;
1204 struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
1205 int ret, spi_irq;
1206 int i;
1207 char spi_wq_name[20];
1208
1209 master = spi_alloc_master(&pdev->dev, sizeof *tspi);
1210 if (master == NULL) {
1211 dev_err(&pdev->dev, "master allocation failed\n");
1212 return -ENOMEM;
1213 }
1214
1215 /* the spi->mode bits understood by this driver: */
1216 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1217
1218 if (pdev->id != -1)
1219 master->bus_num = pdev->id;
1220
1221 master->setup = spi_tegra_setup;
1222 master->transfer = spi_tegra_transfer;
1223 master->num_chipselect = MAX_CHIP_SELECT;
1224
1225 dev_set_drvdata(&pdev->dev, master);
1226 tspi = spi_master_get_devdata(master);
1227 tspi->master = master;
1228 tspi->pdev = pdev;
1229 tspi->is_transfer_in_progress = false;
1230 tspi->is_suspended = false;
1231 spin_lock_init(&tspi->lock);
1232
1233 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1234 if (r == NULL) {
1235 ret = -ENODEV;
1236 goto fail_no_mem;
1237 }
1238
1239 if (!request_mem_region(r->start, resource_size(r),
1240 dev_name(&pdev->dev))) {
1241 ret = -EBUSY;
1242 goto fail_no_mem;
1243 }
1244
1245 tspi->phys = r->start;
1246 tspi->base = ioremap(r->start, resource_size(r));
1247 if (!tspi->base) {
1248 dev_err(&pdev->dev, "can't ioremap iomem\n");
1249 ret = -ENOMEM;
1250 goto fail_io_map;
1251 }
1252
1253 spi_irq = platform_get_irq(pdev, 0);
1254 if (unlikely(spi_irq < 0)) {
1255 dev_err(&pdev->dev, "can't find irq resource\n");
1256 ret = -ENXIO;
1257 goto fail_irq_req;
1258 }
1259 tspi->irq = spi_irq;
1260
1261 sprintf(tspi->port_name, "tegra_spi_%d", pdev->id);
1262 ret = request_threaded_irq(tspi->irq, spi_tegra_isr,
1263 spi_tegra_isr_thread, IRQF_DISABLED,
1264 tspi->port_name, tspi);
1265 if (ret < 0) {
1266 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1267 tspi->irq);
1268 goto fail_irq_req;
1269 }
1270
1271 tspi->clk = clk_get(&pdev->dev, "spi");
1272 if (IS_ERR(tspi->clk)) {
1273 dev_err(&pdev->dev, "can not get clock\n");
1274 ret = PTR_ERR(tspi->clk);
1275 goto fail_clk_get;
1276 }
1277
1278 tspi->sclk = clk_get(&pdev->dev, "sclk");
1279 if (IS_ERR(tspi->sclk)) {
1280 dev_err(&pdev->dev, "can not get sclock\n");
1281 ret = PTR_ERR(tspi->sclk);
1282 goto fail_sclk_get;
1283 }
1284
1285 INIT_LIST_HEAD(&tspi->queue);
1286
1287 if (pdata) {
1288 tspi->is_clkon_always = pdata->is_clkon_always;
1289 tspi->is_dma_allowed = pdata->is_dma_based;
1290 tspi->dma_buf_size = (pdata->max_dma_buffer) ?
1291 pdata->max_dma_buffer : DEFAULT_SPI_DMA_BUF_LEN;
1292 tspi->parent_clk_count = pdata->parent_clk_count;
1293 tspi->parent_clk_list = pdata->parent_clk_list;
1294 tspi->max_rate = pdata->max_rate;
1295 } else {
1296 tspi->is_clkon_always = false;
1297 tspi->is_dma_allowed = true;
1298 tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1299 tspi->parent_clk_count = 0;
1300 tspi->parent_clk_list = NULL;
1301 tspi->max_rate = 0;
1302 }
1303
1304 tspi->max_parent_rate = 0;
1305 tspi->min_div = 0;
1306
1307 if (tspi->parent_clk_count) {
1308 tspi->max_parent_rate = tspi->parent_clk_list[0].fixed_clk_rate;
1309 for (i = 1; i < tspi->parent_clk_count; ++i) {
1310 tspi->max_parent_rate = max(tspi->max_parent_rate,
1311 tspi->parent_clk_list[i].fixed_clk_rate);
1312 }
1313 if (tspi->max_rate)
1314 tspi->min_div = DIV_ROUND_UP(tspi->max_parent_rate,
1315 tspi->max_rate);
1316 }
1317 tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
1318
1319 if (!tspi->is_dma_allowed)
1320 goto skip_dma_alloc;
1321
1322 init_completion(&tspi->tx_dma_complete);
1323 init_completion(&tspi->rx_dma_complete);
1324
1325
1326 tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
1327 "spi_rx_%d", pdev->id);
1328 if (!tspi->rx_dma) {
1329 dev_err(&pdev->dev, "can not allocate rx dma channel\n");
1330 ret = -ENODEV;
1331 goto fail_rx_dma_alloc;
1332 }
1333
1334 tspi->rx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
1335 &tspi->rx_buf_phys, GFP_KERNEL);
1336 if (!tspi->rx_buf) {
1337 dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
1338 ret = -ENOMEM;
1339 goto fail_rx_buf_alloc;
1340 }
1341
1342 /* Make the dma buffer to read by dma */
1343 dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
1344 tspi->dma_buf_size, DMA_TO_DEVICE);
1345
1346 memset(&tspi->rx_dma_req, 0, sizeof(struct tegra_dma_req));
1347 tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
1348 tspi->rx_dma_req.to_memory = 1;
1349 tspi->rx_dma_req.dest_addr = tspi->rx_buf_phys;
1350 tspi->rx_dma_req.virt_addr = tspi->rx_buf;
1351 tspi->rx_dma_req.dest_bus_width = 32;
1352 tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
1353 tspi->rx_dma_req.source_bus_width = 32;
1354 tspi->rx_dma_req.source_wrap = 4;
1355 tspi->rx_dma_req.dest_wrap = 0;
1356 tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
1357 tspi->rx_dma_req.dev = tspi;
1358
1359 tspi->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
1360 "spi_tx_%d", pdev->id);
1361 if (!tspi->tx_dma) {
1362 dev_err(&pdev->dev, "can not allocate tx dma channel\n");
1363 ret = -ENODEV;
1364 goto fail_tx_dma_alloc;
1365 }
1366
1367 tspi->tx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
1368 &tspi->tx_buf_phys, GFP_KERNEL);
1369 if (!tspi->tx_buf) {
1370 dev_err(&pdev->dev, "can not allocate tx bounce buffer\n");
1371 ret = -ENOMEM;
1372 goto fail_tx_buf_alloc;
1373 }
1374
1375 /* Make the dma buffer to read by dma */
1376 dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
1377 tspi->dma_buf_size, DMA_TO_DEVICE);
1378
1379 memset(&tspi->tx_dma_req, 0, sizeof(struct tegra_dma_req));
1380 tspi->tx_dma_req.complete = tegra_spi_tx_dma_complete;
1381 tspi->tx_dma_req.to_memory = 0;
1382 tspi->tx_dma_req.dest_addr = tspi->phys + SLINK_TX_FIFO;
1383 tspi->tx_dma_req.virt_addr = tspi->tx_buf;
1384 tspi->tx_dma_req.dest_bus_width = 32;
1385 tspi->tx_dma_req.dest_wrap = 4;
1386 tspi->tx_dma_req.source_wrap = 0;
1387 tspi->tx_dma_req.source_addr = tspi->tx_buf_phys;
1388 tspi->tx_dma_req.source_bus_width = 32;
1389 tspi->tx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
1390 tspi->tx_dma_req.dev = tspi;
1391 tspi->max_buf_size = tspi->dma_buf_size;
1392 tspi->def_command_reg = SLINK_CS_SW | SLINK_M_S;
1393 tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
1394
1395skip_dma_alloc:
1396 pm_runtime_enable(&pdev->dev);
1397 pm_runtime_get_sync(&pdev->dev);
1398 tspi->clk_state = 1;
1399 master->dev.of_node = pdev->dev.of_node;
1400 ret = spi_register_master(master);
1401 if (!tspi->is_clkon_always) {
1402 if (tspi->clk_state) {
1403 pm_runtime_put_sync(&pdev->dev);
1404 tspi->clk_state = 0;
1405 }
1406 }
1407
1408 if (ret < 0) {
1409 dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1410 goto fail_master_register;
1411 }
1412
1413 /* create the workqueue for the kbc path */
1414 snprintf(spi_wq_name, sizeof(spi_wq_name), "spi_tegra-%d", pdev->id);
1415 tspi->spi_workqueue = create_singlethread_workqueue(spi_wq_name);
1416 if (!tspi->spi_workqueue) {
1417 dev_err(&pdev->dev, "Failed to create work queue\n");
1418 ret = -ENODEV;
1419 goto fail_workqueue;
1420 }
1421
1422 INIT_WORK(&tspi->spi_transfer_work, tegra_spi_transfer_work);
1423
1424 return ret;
1425
1426fail_workqueue:
1427 spi_unregister_master(master);
1428
1429fail_master_register:
1430 if (tspi->tx_buf)
1431 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1432 tspi->tx_buf, tspi->tx_buf_phys);
1433fail_tx_buf_alloc:
1434 if (tspi->tx_dma)
1435 tegra_dma_free_channel(tspi->tx_dma);
1436fail_tx_dma_alloc:
1437 if (tspi->rx_buf)
1438 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1439 tspi->rx_buf, tspi->rx_buf_phys);
1440fail_rx_buf_alloc:
1441 if (tspi->rx_dma)
1442 tegra_dma_free_channel(tspi->rx_dma);
1443fail_rx_dma_alloc:
1444 pm_runtime_disable(&pdev->dev);
1445 clk_put(tspi->sclk);
1446fail_sclk_get:
1447 clk_put(tspi->clk);
1448fail_clk_get:
1449 free_irq(tspi->irq, tspi);
1450fail_irq_req:
1451 iounmap(tspi->base);
1452fail_io_map:
1453 release_mem_region(r->start, resource_size(r));
1454fail_no_mem:
1455 spi_master_put(master);
1456 return ret;
1457}
1458
1459static int __devexit spi_tegra_remove(struct platform_device *pdev)
1460{
1461 struct spi_master *master;
1462 struct spi_tegra_data *tspi;
1463 struct resource *r;
1464
1465 master = dev_get_drvdata(&pdev->dev);
1466 tspi = spi_master_get_devdata(master);
1467
1468 spi_unregister_master(master);
1469 if (tspi->tx_buf)
1470 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1471 tspi->tx_buf, tspi->tx_buf_phys);
1472 if (tspi->tx_dma)
1473 tegra_dma_free_channel(tspi->tx_dma);
1474 if (tspi->rx_buf)
1475 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1476 tspi->rx_buf, tspi->rx_buf_phys);
1477 if (tspi->rx_dma)
1478 tegra_dma_free_channel(tspi->rx_dma);
1479
1480 if (tspi->is_clkon_always) {
1481 pm_runtime_put_sync(&pdev->dev);
1482 tspi->clk_state = 0;
1483 }
1484
1485 pm_runtime_disable(&pdev->dev);
1486 clk_put(tspi->sclk);
1487 clk_put(tspi->clk);
1488 iounmap(tspi->base);
1489
1490 destroy_workqueue(tspi->spi_workqueue);
1491
1492 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1493 release_mem_region(r->start, resource_size(r));
1494
1495 return 0;
1496}
1497
1498#ifdef CONFIG_PM
1499static int spi_tegra_suspend(struct platform_device *pdev, pm_message_t state)
1500{
1501 struct spi_master *master;
1502 struct spi_tegra_data *tspi;
1503 unsigned limit = 50;
1504 unsigned long flags;
1505
1506 master = dev_get_drvdata(&pdev->dev);
1507 tspi = spi_master_get_devdata(master);
1508 spin_lock_irqsave(&tspi->lock, flags);
1509
1510 /* Wait for all transfer completes */
1511 if (!list_empty(&tspi->queue))
1512 dev_warn(&pdev->dev, "The transfer list is not empty "
1513 "Waiting for time %d ms to complete transfer\n",
1514 limit * 20);
1515
1516 while (!list_empty(&tspi->queue) && limit--) {
1517 spin_unlock_irqrestore(&tspi->lock, flags);
1518 msleep(20);
1519 spin_lock_irqsave(&tspi->lock, flags);
1520 }
1521
1522 /* Wait for current transfer completes only */
1523 tspi->is_suspended = true;
1524 if (!list_empty(&tspi->queue)) {
1525 limit = 50;
1526 dev_err(&pdev->dev, "All transfer has not completed, "
1527 "Waiting for %d ms current transfer to complete\n",
1528 limit * 20);
1529 while (tspi->is_transfer_in_progress && limit--) {
1530 spin_unlock_irqrestore(&tspi->lock, flags);
1531 msleep(20);
1532 spin_lock_irqsave(&tspi->lock, flags);
1533 }
1534 }
1535
1536 if (tspi->is_transfer_in_progress) {
1537 dev_err(&pdev->dev,
1538 "Spi transfer is in progress Avoiding suspend\n");
1539 tspi->is_suspended = false;
1540 spin_unlock_irqrestore(&tspi->lock, flags);
1541 return -EBUSY;
1542 }
1543
1544 spin_unlock_irqrestore(&tspi->lock, flags);
1545 if (tspi->is_clkon_always) {
1546 pm_runtime_put_sync(&pdev->dev);
1547 tspi->clk_state = 0;
1548 }
1549 return 0;
1550}
1551
1552static int spi_tegra_resume(struct platform_device *pdev)
1553{
1554 struct spi_master *master;
1555 struct spi_tegra_data *tspi;
1556 struct spi_message *m;
1557 struct spi_device *spi;
1558 struct spi_transfer *t = NULL;
1559 int single_xfer = 0;
1560 unsigned long flags;
1561
1562 master = dev_get_drvdata(&pdev->dev);
1563 tspi = spi_master_get_devdata(master);
1564
1565 pm_runtime_get_sync(&pdev->dev);
1566 tspi->clk_state = 1;
1567 spi_tegra_writel(tspi, tspi->command_reg, SLINK_COMMAND);
1568 if (!tspi->is_clkon_always) {
1569 pm_runtime_put_sync(&pdev->dev);
1570 tspi->clk_state = 0;
1571 }
1572 spin_lock_irqsave(&tspi->lock, flags);
1573
1574 tspi->cur_speed = 0;
1575 tspi->is_suspended = false;
1576 if (!list_empty(&tspi->queue)) {
1577 m = list_first_entry(&tspi->queue, struct spi_message, queue);
1578 spi = m->state;
1579 single_xfer = list_is_singular(&m->transfers);
1580 m->actual_length = 0;
1581 m->status = 0;
1582 t = list_first_entry(&m->transfers, struct spi_transfer,
1583 transfer_list);
1584 tspi->is_transfer_in_progress = true;
1585 }
1586 spin_unlock_irqrestore(&tspi->lock, flags);
1587 if (t)
1588 spi_tegra_start_transfer(spi, t, true, single_xfer);
1589 return 0;
1590}
1591#endif
1592
1593#if defined(CONFIG_PM_RUNTIME)
1594
1595static int tegra_spi_runtime_idle(struct device *dev)
1596{
1597 struct spi_master *master;
1598 struct spi_tegra_data *tspi;
1599 master = dev_get_drvdata(dev);
1600 tspi = spi_master_get_devdata(master);
1601
1602 clk_disable(tspi->clk);
1603 clk_disable(tspi->sclk);
1604 return 0;
1605}
1606
1607static int tegra_spi_runtime_resume(struct device *dev)
1608{
1609 struct spi_master *master;
1610 struct spi_tegra_data *tspi;
1611 master = dev_get_drvdata(dev);
1612 tspi = spi_master_get_devdata(master);
1613
1614 clk_enable(tspi->sclk);
1615 clk_enable(tspi->clk);
1616 return 0;
1617}
1618
1619static const struct dev_pm_ops tegra_spi_dev_pm_ops = {
1620 .runtime_idle = tegra_spi_runtime_idle,
1621 .runtime_resume = tegra_spi_runtime_resume,
1622};
1623
1624#endif
1625
1626MODULE_ALIAS("platform:spi_tegra");
1627
1628#ifdef CONFIG_OF
1629static struct of_device_id spi_tegra_of_match_table[] __devinitdata = {
1630 { .compatible = "nvidia,tegra20-spi", },
1631 {}
1632};
1633MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table);
1634#else /* CONFIG_OF */
1635#define spi_tegra_of_match_table NULL
1636#endif /* CONFIG_OF */
1637
1638static struct platform_driver spi_tegra_driver = {
1639 .driver = {
1640 .name = "spi_tegra",
1641 .owner = THIS_MODULE,
1642#if defined(CONFIG_PM_RUNTIME)
1643 .pm = &tegra_spi_dev_pm_ops,
1644#endif
1645 .of_match_table = spi_tegra_of_match_table,
1646 },
1647 .remove = __devexit_p(spi_tegra_remove),
1648#ifdef CONFIG_PM
1649 .suspend = spi_tegra_suspend,
1650 .resume = spi_tegra_resume,
1651#endif
1652};
1653
1654static int __init spi_tegra_init(void)
1655{
1656 return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
1657}
1658subsys_initcall(spi_tegra_init);
1659
1660static void __exit spi_tegra_exit(void)
1661{
1662 platform_driver_unregister(&spi_tegra_driver);
1663}
1664module_exit(spi_tegra_exit);
1665
1666MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_slave_tegra.c b/drivers/spi/spi_slave_tegra.c
new file mode 100644
index 00000000000..c17a8cb1ac1
--- /dev/null
+++ b/drivers/spi/spi_slave_tegra.c
@@ -0,0 +1,1401 @@
1/*
2 * Driver for Nvidia TEGRA spi controller in slave mode.
3 *
4 * Copyright (c) 2011, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21/*#define DEBUG 1*/
22/*#define VERBOSE_DEBUG 1*/
23
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/err.h>
27#include <linux/platform_device.h>
28#include <linux/io.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmapool.h>
31#include <linux/clk.h>
32#include <linux/interrupt.h>
33#include <linux/delay.h>
34#include <linux/completion.h>
35
36#include <linux/spi/spi.h>
37#include <linux/spi-tegra.h>
38
39#include <mach/dma.h>
40#include <mach/clk.h>
41#include <mach/spi.h>
42
43#define SLINK_COMMAND 0x000
44#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
45#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
46#define SLINK_BOTH_EN (1 << 10)
47#define SLINK_CS_SW (1 << 11)
48#define SLINK_CS_VALUE (1 << 12)
49#define SLINK_CS_POLARITY (1 << 13)
50#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
51#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
52#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
53#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
54#define SLINK_IDLE_SDA_MASK (3 << 16)
55#define SLINK_CS_POLARITY1 (1 << 20)
56#define SLINK_CK_SDA (1 << 21)
57#define SLINK_CS_POLARITY2 (1 << 22)
58#define SLINK_CS_POLARITY3 (1 << 23)
59#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
60#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
61#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
62#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
63#define SLINK_IDLE_SCLK_MASK (3 << 24)
64#define SLINK_M_S (1 << 28)
65#define SLINK_WAIT (1 << 29)
66#define SLINK_GO (1 << 30)
67#define SLINK_ENB (1 << 31)
68
69#define SLINK_COMMAND2 0x004
70#define SLINK_LSBFE (1 << 0)
71#define SLINK_SSOE (1 << 1)
72#define SLINK_SPIE (1 << 4)
73#define SLINK_BIDIROE (1 << 6)
74#define SLINK_MODFEN (1 << 7)
75#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
76#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
77#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
78#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
79#define SLINK_FIFO_REFILLS_0 (0 << 22)
80#define SLINK_FIFO_REFILLS_1 (1 << 22)
81#define SLINK_FIFO_REFILLS_2 (2 << 22)
82#define SLINK_FIFO_REFILLS_3 (3 << 22)
83#define SLINK_FIFO_REFILLS_MASK (3 << 22)
84#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
85#define SLINK_SPC0 (1 << 29)
86#define SLINK_TXEN (1 << 30)
87#define SLINK_RXEN (1 << 31)
88
89#define SLINK_STATUS 0x008
90#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
91#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
92#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
93#define SLINK_MODF (1 << 16)
94#define SLINK_RX_UNF (1 << 18)
95#define SLINK_TX_OVF (1 << 19)
96#define SLINK_TX_FULL (1 << 20)
97#define SLINK_TX_EMPTY (1 << 21)
98#define SLINK_RX_FULL (1 << 22)
99#define SLINK_RX_EMPTY (1 << 23)
100#define SLINK_TX_UNF (1 << 24)
101#define SLINK_RX_OVF (1 << 25)
102#define SLINK_TX_FLUSH (1 << 26)
103#define SLINK_RX_FLUSH (1 << 27)
104#define SLINK_SCLK (1 << 28)
105#define SLINK_ERR (1 << 29)
106#define SLINK_RDY (1 << 30)
107#define SLINK_BSY (1 << 31)
108
109#define SLINK_MAS_DATA 0x010
110#define SLINK_SLAVE_DATA 0x014
111
112#define SLINK_DMA_CTL 0x018
113#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
114#define SLINK_TX_TRIG_1 (0 << 16)
115#define SLINK_TX_TRIG_4 (1 << 16)
116#define SLINK_TX_TRIG_8 (2 << 16)
117#define SLINK_TX_TRIG_16 (3 << 16)
118#define SLINK_TX_TRIG_MASK (3 << 16)
119#define SLINK_RX_TRIG_1 (0 << 18)
120#define SLINK_RX_TRIG_4 (1 << 18)
121#define SLINK_RX_TRIG_8 (2 << 18)
122#define SLINK_RX_TRIG_16 (3 << 18)
123#define SLINK_RX_TRIG_MASK (3 << 18)
124#define SLINK_PACKED (1 << 20)
125#define SLINK_PACK_SIZE_4 (0 << 21)
126#define SLINK_PACK_SIZE_8 (1 << 21)
127#define SLINK_PACK_SIZE_16 (2 << 21)
128#define SLINK_PACK_SIZE_32 (3 << 21)
129#define SLINK_PACK_SIZE_MASK (3 << 21)
130#define SLINK_IE_TXC (1 << 26)
131#define SLINK_IE_RXC (1 << 27)
132#define SLINK_DMA_EN (1 << 31)
133
134#define SLINK_STATUS2 0x01c
135#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
136#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
137#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
138
139#define SLINK_TX_FIFO 0x100
140#define SLINK_RX_FIFO 0x180
141
142#define DATA_DIR_TX (1 << 0)
143#define DATA_DIR_RX (1 << 1)
144
145#define SPI_FIFO_DEPTH 32
146#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
147
148
149static const unsigned long spi_tegra_req_sels[] = {
150 TEGRA_DMA_REQ_SEL_SL2B1,
151 TEGRA_DMA_REQ_SEL_SL2B2,
152 TEGRA_DMA_REQ_SEL_SL2B3,
153 TEGRA_DMA_REQ_SEL_SL2B4,
154#ifndef CONFIG_ARCH_TEGRA_2x_SOC
155 TEGRA_DMA_REQ_SEL_SL2B5,
156 TEGRA_DMA_REQ_SEL_SL2B6,
157#endif
158
159};
160
161#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
162#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
163#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
164
165#define SLINK_STATUS2_RESET \
166 (TX_FIFO_EMPTY_COUNT_MAX | \
167 RX_FIFO_FULL_COUNT_ZERO << 16)
168
169#define MAX_CHIP_SELECT 4
170#define SLINK_FIFO_DEPTH 4
171
172struct spi_tegra_data {
173 struct spi_master *master;
174 struct platform_device *pdev;
175 spinlock_t lock;
176 char port_name[32];
177
178 struct clk *clk;
179 void __iomem *base;
180 unsigned long phys;
181 unsigned irq;
182
183 u32 cur_speed;
184
185 struct list_head queue;
186 struct spi_transfer *cur;
187 struct spi_device *cur_spi;
188 unsigned cur_pos;
189 unsigned cur_len;
190 unsigned words_per_32bit;
191 unsigned bytes_per_word;
192 unsigned curr_dma_words;
193
194 unsigned cur_direction;
195
196 bool is_dma_allowed;
197
198 struct tegra_dma_req rx_dma_req;
199 struct tegra_dma_channel *rx_dma;
200 u32 *rx_buf;
201 dma_addr_t rx_buf_phys;
202 unsigned cur_rx_pos;
203
204 struct tegra_dma_req tx_dma_req;
205 struct tegra_dma_channel *tx_dma;
206 u32 *tx_buf;
207 dma_addr_t tx_buf_phys;
208 unsigned cur_tx_pos;
209
210 unsigned dma_buf_size;
211 unsigned max_buf_size;
212 bool is_curr_dma_xfer;
213
214 bool is_clkon_always;
215 bool clk_state;
216 bool is_suspended;
217
218 bool is_hw_based_cs;
219
220 struct completion rx_dma_complete;
221 struct completion tx_dma_complete;
222
223 u32 rx_complete;
224 u32 tx_complete;
225 u32 tx_status;
226 u32 rx_status;
227 u32 status_reg;
228 bool is_packed;
229 unsigned long packed_size;
230
231 u32 command_reg;
232 u32 command2_reg;
233 u32 dma_control_reg;
234 u32 def_command_reg;
235 u32 def_command2_reg;
236
237 callback client_slave_ready_cb;
238 void *client_data;
239
240 struct spi_clk_parent *parent_clk_list;
241 int parent_clk_count;
242 unsigned long max_rate;
243 unsigned long max_parent_rate;
244 int min_div;
245};
246
247static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
248 unsigned long reg)
249{
250 if (!tspi->clk_state)
251 BUG();
252 return readl(tspi->base + reg);
253}
254
255static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
256 unsigned long val, unsigned long reg)
257{
258 if (!tspi->clk_state)
259 BUG();
260 writel(val, tspi->base + reg);
261}
262
263int spi_tegra_register_callback(struct spi_device *spi, callback func,
264 void *client_data)
265{
266 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
267
268 if (!tspi || !func)
269 return -EINVAL;
270 tspi->client_slave_ready_cb = func;
271 tspi->client_data = client_data;
272 return 0;
273}
274EXPORT_SYMBOL_GPL(spi_tegra_register_callback);
275
276static void spi_tegra_clear_status(struct spi_tegra_data *tspi)
277{
278 unsigned long val;
279 unsigned long val_write = 0;
280
281 val = spi_tegra_readl(tspi, SLINK_STATUS);
282
283 val_write = SLINK_RDY;
284 if (val & SLINK_TX_OVF)
285 val_write |= SLINK_TX_OVF;
286 if (val & SLINK_RX_OVF)
287 val_write |= SLINK_RX_OVF;
288 if (val & SLINK_RX_UNF)
289 val_write |= SLINK_RX_UNF;
290 if (val & SLINK_TX_UNF)
291 val_write |= SLINK_TX_UNF;
292
293 spi_tegra_writel(tspi, val_write, SLINK_STATUS);
294}
295
296static unsigned long spi_tegra_get_packed_size(struct spi_tegra_data *tspi,
297 struct spi_transfer *t)
298{
299 unsigned long val;
300
301 switch (tspi->bytes_per_word) {
302 case 0:
303 val = SLINK_PACK_SIZE_4;
304 break;
305 case 1:
306 val = SLINK_PACK_SIZE_8;
307 break;
308 case 2:
309 val = SLINK_PACK_SIZE_16;
310 break;
311 case 4:
312 val = SLINK_PACK_SIZE_32;
313 break;
314 default:
315 val = 0;
316 }
317 return val;
318}
319
320static unsigned spi_tegra_calculate_curr_xfer_param(
321 struct spi_device *spi, struct spi_tegra_data *tspi,
322 struct spi_transfer *t)
323{
324 unsigned remain_len = t->len - tspi->cur_pos;
325 unsigned max_word;
326 unsigned bits_per_word ;
327 unsigned max_len;
328 unsigned total_fifo_words;
329
330 bits_per_word = t->bits_per_word ? t->bits_per_word :
331 spi->bits_per_word;
332 tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
333
334 if (bits_per_word == 8 || bits_per_word == 16) {
335 tspi->is_packed = 1;
336 tspi->words_per_32bit = 32/bits_per_word;
337 } else {
338 tspi->is_packed = 0;
339 tspi->words_per_32bit = 1;
340 }
341 tspi->packed_size = spi_tegra_get_packed_size(tspi, t);
342
343 if (tspi->is_packed) {
344 max_len = min(remain_len, tspi->max_buf_size);
345 tspi->curr_dma_words = max_len/tspi->bytes_per_word;
346 total_fifo_words = remain_len/4;
347 } else {
348 max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
349 max_word = min(max_word, tspi->max_buf_size/4);
350 tspi->curr_dma_words = max_word;
351 total_fifo_words = remain_len/tspi->bytes_per_word;
352 }
353 /* All transfer should be in one shot */
354 if (tspi->curr_dma_words * tspi->bytes_per_word != t->len) {
355 dev_err(&tspi->pdev->dev, "The requested length can not be"
356 " transferred in one shot\n");
357 BUG();
358 }
359 return total_fifo_words;
360}
361
362static unsigned spi_tegra_fill_tx_fifo_from_client_txbuf(
363 struct spi_tegra_data *tspi, struct spi_transfer *t)
364{
365 unsigned nbytes;
366 unsigned tx_empty_count;
367 unsigned long fifo_status;
368 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
369 unsigned max_n_32bit;
370 unsigned i, count;
371 unsigned long x;
372 unsigned int written_words;
373
374 fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
375 tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
376
377 if (tspi->is_packed) {
378 nbytes = tspi->curr_dma_words * tspi->bytes_per_word;
379 max_n_32bit = (min(nbytes, tx_empty_count*4) - 1)/4 + 1;
380 for (count = 0; count < max_n_32bit; ++count) {
381 x = 0;
382 for (i = 0; (i < 4) && nbytes; i++, nbytes--)
383 x |= (*tx_buf++) << (i*8);
384 spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
385 }
386 written_words = min(max_n_32bit * tspi->words_per_32bit,
387 tspi->curr_dma_words);
388 } else {
389 max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
390 nbytes = max_n_32bit * tspi->bytes_per_word;
391 for (count = 0; count < max_n_32bit; ++count) {
392 x = 0;
393 for (i = 0; nbytes && (i < tspi->bytes_per_word);
394 ++i, nbytes--)
395 x |= ((*tx_buf++) << i*8);
396 spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
397 }
398 written_words = max_n_32bit;
399 }
400 tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
401 return written_words;
402}
403
404static unsigned int spi_tegra_read_rx_fifo_to_client_rxbuf(
405 struct spi_tegra_data *tspi, struct spi_transfer *t)
406{
407 unsigned rx_full_count;
408 unsigned long fifo_status;
409 u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
410 unsigned i, count;
411 unsigned long x;
412 unsigned int read_words;
413 unsigned len;
414
415 fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
416 rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
417 dev_dbg(&tspi->pdev->dev, "Rx fifo count %d\n", rx_full_count);
418 if (tspi->is_packed) {
419 len = tspi->curr_dma_words * tspi->bytes_per_word;
420 for (count = 0; count < rx_full_count; ++count) {
421 x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
422 for (i = 0; len && (i < 4); ++i, len--)
423 *rx_buf++ = (x >> i*8) & 0xFF;
424 }
425 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
426 read_words += tspi->curr_dma_words;
427 } else {
428 for (count = 0; count < rx_full_count; ++count) {
429 x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
430 for (i = 0; (i < tspi->bytes_per_word); ++i)
431 *rx_buf++ = (x >> (i*8)) & 0xFF;
432 }
433 tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
434 read_words += rx_full_count;
435 }
436 return read_words;
437}
438
439static void spi_tegra_copy_client_txbuf_to_spi_txbuf(
440 struct spi_tegra_data *tspi, struct spi_transfer *t)
441{
442 unsigned len;
443 if (tspi->is_packed) {
444 len = tspi->curr_dma_words * tspi->bytes_per_word;
445 memcpy(tspi->tx_buf, t->tx_buf + tspi->cur_pos, len);
446 } else {
447 unsigned int i;
448 unsigned int count;
449 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
450 unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
451 unsigned int x;
452
453 for (count = 0; count < tspi->curr_dma_words; ++count) {
454 x = 0;
455 for (i = 0; consume && (i < tspi->bytes_per_word);
456 ++i, consume--)
457 x |= ((*tx_buf++) << i*8);
458 tspi->tx_buf[count] = x;
459 }
460 }
461 tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
462}
463
464static void spi_tegra_copy_spi_rxbuf_to_client_rxbuf(
465 struct spi_tegra_data *tspi, struct spi_transfer *t)
466{
467 unsigned len;
468 if (tspi->is_packed) {
469 len = tspi->curr_dma_words * tspi->bytes_per_word;
470 memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_buf, len);
471 } else {
472 unsigned int i;
473 unsigned int count;
474 unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
475 unsigned int x;
476 for (count = 0; count < tspi->curr_dma_words; ++count) {
477 x = tspi->rx_buf[count];
478 for (i = 0; (i < tspi->bytes_per_word); ++i)
479 *rx_buf++ = (x >> (i*8)) & 0xFF;
480 }
481 }
482 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
483}
484
485static int spi_tegra_start_dma_based_transfer(
486 struct spi_tegra_data *tspi, struct spi_transfer *t)
487{
488 unsigned long val;
489 unsigned long test_val;
490 unsigned int len;
491 int ret = 0;
492
493 INIT_COMPLETION(tspi->rx_dma_complete);
494 INIT_COMPLETION(tspi->tx_dma_complete);
495
496 val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
497 val |= tspi->packed_size;
498 if (tspi->is_packed)
499 len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
500 4) * 4;
501 else
502 len = tspi->curr_dma_words * 4;
503
504 if (len & 0xF)
505 val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
506 else if (((len) >> 4) & 0x1)
507 val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
508 else
509 val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
510
511 if (tspi->cur_direction & DATA_DIR_TX)
512 val |= SLINK_IE_TXC;
513
514 if (tspi->cur_direction & DATA_DIR_RX)
515 val |= SLINK_IE_RXC;
516
517 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
518 tspi->dma_control_reg = val;
519
520 if (tspi->cur_direction & DATA_DIR_TX) {
521 spi_tegra_copy_client_txbuf_to_spi_txbuf(tspi, t);
522 wmb();
523 tspi->tx_dma_req.size = len;
524 ret = tegra_dma_enqueue_req(tspi->tx_dma, &tspi->tx_dma_req);
525 if (ret < 0) {
526 dev_err(&tspi->pdev->dev, "Error in starting tx dma "
527 " error = %d\n", ret);
528 return ret;
529 }
530
531 /* Wait for tx fifo to be fill before starting slink */
532 test_val = spi_tegra_readl(tspi, SLINK_STATUS);
533 while (!(test_val & SLINK_TX_FULL))
534 test_val = spi_tegra_readl(tspi, SLINK_STATUS);
535 }
536
537 if (tspi->cur_direction & DATA_DIR_RX) {
538 tspi->rx_dma_req.size = len;
539 ret = tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
540 if (ret < 0) {
541 dev_err(&tspi->pdev->dev, "Error in starting rx dma "
542 " error = %d\n", ret);
543 if (tspi->cur_direction & DATA_DIR_TX)
544 tegra_dma_dequeue_req(tspi->tx_dma,
545 &tspi->tx_dma_req);
546 return ret;
547 }
548 }
549 tspi->is_curr_dma_xfer = true;
550 if (tspi->is_packed) {
551 val |= SLINK_PACKED;
552 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
553 udelay(1);
554 wmb();
555 }
556
557 val |= SLINK_DMA_EN;
558 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
559 return ret;
560}
561
562static int spi_tegra_start_cpu_based_transfer(
563 struct spi_tegra_data *tspi, struct spi_transfer *t)
564{
565 unsigned long val;
566 unsigned curr_words;
567
568 val = tspi->packed_size;
569 if (tspi->cur_direction & DATA_DIR_TX)
570 val |= SLINK_IE_TXC;
571
572 if (tspi->cur_direction & DATA_DIR_RX)
573 val |= SLINK_IE_RXC;
574
575 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
576 tspi->dma_control_reg = val;
577
578 if (tspi->cur_direction & DATA_DIR_TX)
579 curr_words = spi_tegra_fill_tx_fifo_from_client_txbuf(tspi, t);
580 else
581 curr_words = tspi->curr_dma_words;
582 val |= SLINK_DMA_BLOCK_SIZE(curr_words - 1);
583 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
584 tspi->dma_control_reg = val;
585
586 tspi->is_curr_dma_xfer = false;
587 if (tspi->is_packed) {
588 val |= SLINK_PACKED;
589 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
590 udelay(1);
591 wmb();
592 }
593 val |= SLINK_DMA_EN;
594 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
595 return 0;
596}
597
598static void set_best_clk_source(struct spi_tegra_data *tspi,
599 unsigned long speed)
600{
601 long new_rate;
602 unsigned long err_rate;
603 int rate = speed * 4;
604 unsigned int fin_err = speed * 4;
605 int final_index = -1;
606 int count;
607 int ret;
608 struct clk *pclk;
609 unsigned long prate, crate, nrate;
610 unsigned long cdiv;
611
612 if (!tspi->parent_clk_count || !tspi->parent_clk_list)
613 return;
614
615 /* make sure divisor is more than min_div */
616 pclk = clk_get_parent(tspi->clk);
617 prate = clk_get_rate(pclk);
618 crate = clk_get_rate(tspi->clk);
619 cdiv = DIV_ROUND_UP(prate, crate);
620 if (cdiv < tspi->min_div) {
621 nrate = DIV_ROUND_UP(prate, tspi->min_div);
622 clk_set_rate(tspi->clk, nrate);
623 }
624
625 for (count = 0; count < tspi->parent_clk_count; ++count) {
626 if (!tspi->parent_clk_list[count].parent_clk)
627 continue;
628 ret = clk_set_parent(tspi->clk,
629 tspi->parent_clk_list[count].parent_clk);
630 if (ret < 0) {
631 dev_warn(&tspi->pdev->dev, "Error in setting parent "
632 " clk src %s\n",
633 tspi->parent_clk_list[count].name);
634 continue;
635 }
636
637 new_rate = clk_round_rate(tspi->clk, rate);
638 if (new_rate < 0)
639 continue;
640
641 err_rate = abs(new_rate - rate);
642 if (err_rate < fin_err) {
643 final_index = count;
644 fin_err = err_rate;
645 }
646 }
647
648 if (final_index >= 0) {
649 dev_info(&tspi->pdev->dev, "Setting clk_src %s\n",
650 tspi->parent_clk_list[final_index].name);
651 clk_set_parent(tspi->clk,
652 tspi->parent_clk_list[final_index].parent_clk);
653 }
654}
655
656static void spi_tegra_start_transfer(struct spi_device *spi,
657 struct spi_transfer *t)
658{
659 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
660 u32 speed;
661 u8 bits_per_word;
662 unsigned total_fifo_words;
663 int ret;
664 unsigned long command;
665 unsigned long command2;
666
667 bits_per_word = t->bits_per_word ? t->bits_per_word :
668 spi->bits_per_word;
669
670 speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
671 if (speed != tspi->cur_speed) {
672 set_best_clk_source(tspi, speed);
673 clk_set_rate(tspi->clk, speed * 4);
674 tspi->cur_speed = speed;
675 }
676
677 tspi->cur = t;
678 tspi->cur_spi = spi;
679 tspi->cur_pos = 0;
680 tspi->cur_rx_pos = 0;
681 tspi->cur_tx_pos = 0;
682 tspi->rx_complete = 0;
683 tspi->tx_complete = 0;
684 total_fifo_words = spi_tegra_calculate_curr_xfer_param(spi, tspi, t);
685
686 command2 = tspi->def_command2_reg;
687 if (!tspi->is_clkon_always) {
688 if (!tspi->clk_state) {
689 clk_enable(tspi->clk);
690 tspi->clk_state = 1;
691 }
692 }
693
694 spi_tegra_clear_status(tspi);
695
696 command = tspi->def_command_reg;
697 command |= SLINK_BIT_LENGTH(bits_per_word - 1);
698
699 command |= SLINK_CS_SW;
700
701 command &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA;
702 if (spi->mode & SPI_CPHA)
703 command |= SLINK_CK_SDA;
704
705 if (spi->mode & SPI_CPOL)
706 command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
707 else
708 command |= SLINK_IDLE_SCLK_DRIVE_LOW;
709
710 spi_tegra_writel(tspi, command, SLINK_COMMAND);
711 tspi->command_reg = command;
712
713 dev_dbg(&tspi->pdev->dev, "The def 0x%x and written 0x%lx\n",
714 tspi->def_command_reg, command);
715
716 command2 &= ~(SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN);
717 tspi->cur_direction = 0;
718 if (t->rx_buf) {
719 command2 |= SLINK_RXEN;
720 tspi->cur_direction |= DATA_DIR_RX;
721 }
722 if (t->tx_buf) {
723 command2 |= SLINK_TXEN;
724 tspi->cur_direction |= DATA_DIR_TX;
725 }
726 command2 |= SLINK_SS_EN_CS(spi->chip_select);
727 spi_tegra_writel(tspi, command2, SLINK_COMMAND2);
728 tspi->command2_reg = command2;
729
730 if (total_fifo_words > SPI_FIFO_DEPTH)
731 ret = spi_tegra_start_dma_based_transfer(tspi, t);
732 else
733 ret = spi_tegra_start_cpu_based_transfer(tspi, t);
734 WARN_ON(ret < 0);
735
736 if (tspi->client_slave_ready_cb)
737 tspi->client_slave_ready_cb(tspi->client_data);
738}
739
740static void spi_tegra_start_message(struct spi_device *spi,
741 struct spi_message *m)
742{
743 struct spi_transfer *t;
744 m->actual_length = 0;
745 m->status = 0;
746 t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
747 spi_tegra_start_transfer(spi, t);
748}
749
750static int spi_tegra_setup(struct spi_device *spi)
751{
752 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
753 unsigned long cs_bit;
754 unsigned long val;
755 unsigned long flags;
756
757 dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
758 spi->bits_per_word,
759 spi->mode & SPI_CPOL ? "" : "~",
760 spi->mode & SPI_CPHA ? "" : "~",
761 spi->max_speed_hz);
762
763 BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
764 switch (spi->chip_select) {
765 case 0:
766 cs_bit = SLINK_CS_POLARITY;
767 break;
768
769 case 1:
770 cs_bit = SLINK_CS_POLARITY1;
771 break;
772
773 case 2:
774 cs_bit = SLINK_CS_POLARITY2;
775 break;
776
777 case 3:
778 cs_bit = SLINK_CS_POLARITY3;
779 break;
780
781 default:
782 return -EINVAL;
783 }
784
785 spin_lock_irqsave(&tspi->lock, flags);
786 val = tspi->def_command_reg;
787 if (spi->mode & SPI_CS_HIGH)
788 val |= cs_bit;
789 else
790 val &= ~cs_bit;
791 tspi->def_command_reg = val;
792
793 if (!tspi->is_clkon_always && !tspi->clk_state) {
794 clk_enable(tspi->clk);
795 tspi->clk_state = 1;
796 }
797 spi_tegra_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
798 if (!tspi->is_clkon_always && tspi->clk_state) {
799 clk_disable(tspi->clk);
800 tspi->clk_state = 0;
801 }
802
803 spin_unlock_irqrestore(&tspi->lock, flags);
804 return 0;
805}
806
807static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
808{
809 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
810 struct spi_transfer *t;
811 unsigned long flags;
812 int was_empty;
813 int bytes_per_word;
814 u8 bits_per_word;
815 int fifo_word;
816
817 /* Support only one transfer per message */
818 if (!list_is_singular(&m->transfers))
819 return -EINVAL;
820
821 if (list_empty(&m->transfers) || !m->complete)
822 return -EINVAL;
823
824 t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
825 if (t->bits_per_word < 0 || t->bits_per_word > 32)
826 return -EINVAL;
827
828 if (t->len == 0)
829 return -EINVAL;
830
831 bits_per_word = (t->bits_per_word) ? : spi->bits_per_word;
832
833 /* Check that the all words are available */
834 bytes_per_word = (bits_per_word + 7)/8;
835
836 if (t->len % bytes_per_word != 0)
837 return -EINVAL;
838
839 if (!t->rx_buf && !t->tx_buf)
840 return -EINVAL;
841
842 if ((bits_per_word == 8) || (bits_per_word == 16))
843 fifo_word = t->len/4;
844 else
845 fifo_word = t->len/bytes_per_word;
846 if (fifo_word >= tspi->max_buf_size/4)
847 return -EINVAL;
848
849 spin_lock_irqsave(&tspi->lock, flags);
850
851 if (WARN_ON(tspi->is_suspended)) {
852 spin_unlock_irqrestore(&tspi->lock, flags);
853 return -EBUSY;
854 }
855
856 m->state = spi;
857
858 was_empty = list_empty(&tspi->queue);
859 list_add_tail(&m->queue, &tspi->queue);
860
861 if (was_empty)
862 spi_tegra_start_message(spi, m);
863
864 spin_unlock_irqrestore(&tspi->lock, flags);
865
866 return 0;
867}
868
869static void spi_tegra_curr_transfer_complete(struct spi_tegra_data *tspi,
870 unsigned err, unsigned cur_xfer_size)
871{
872 struct spi_message *m;
873 struct spi_device *spi;
874
875 m = list_first_entry(&tspi->queue, struct spi_message, queue);
876 if (err)
877 m->status = -EIO;
878 spi = m->state;
879
880 m->actual_length += cur_xfer_size;
881 list_del(&m->queue);
882 m->complete(m->context);
883 if (!list_empty(&tspi->queue)) {
884 m = list_first_entry(&tspi->queue, struct spi_message, queue);
885 spi = m->state;
886 spi_tegra_start_message(spi, m);
887 } else {
888 spi_tegra_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
889 spi_tegra_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
890 if (!tspi->is_clkon_always) {
891 if (tspi->clk_state) {
892 /* Provide delay to stablize the signal
893 state */
894 udelay(10);
895 clk_disable(tspi->clk);
896 tspi->clk_state = 0;
897 }
898 }
899 }
900}
901
902static void tegra_spi_tx_dma_complete(struct tegra_dma_req *req)
903{
904 struct spi_tegra_data *tspi = req->dev;
905 complete(&tspi->tx_dma_complete);
906}
907
908static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
909{
910 struct spi_tegra_data *tspi = req->dev;
911 complete(&tspi->rx_dma_complete);
912}
913
914static void handle_cpu_based_xfer(void *context_data)
915{
916 struct spi_tegra_data *tspi = context_data;
917 struct spi_transfer *t = tspi->cur;
918 unsigned long flags;
919
920 spin_lock_irqsave(&tspi->lock, flags);
921 if (tspi->tx_status || tspi->rx_status ||
922 (tspi->status_reg & SLINK_BSY)) {
923 dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
924 __func__, tspi->status_reg);
925 tegra_periph_reset_assert(tspi->clk);
926 udelay(2);
927 tegra_periph_reset_deassert(tspi->clk);
928 WARN_ON(1);
929 spi_tegra_curr_transfer_complete(tspi,
930 tspi->tx_status || tspi->rx_status, t->len);
931 goto exit;
932 }
933
934 dev_vdbg(&tspi->pdev->dev, " Current direction %x\n",
935 tspi->cur_direction);
936 if (tspi->cur_direction & DATA_DIR_RX)
937 spi_tegra_read_rx_fifo_to_client_rxbuf(tspi, t);
938
939 if (tspi->cur_direction & DATA_DIR_TX)
940 tspi->cur_pos = tspi->cur_tx_pos;
941 else if (tspi->cur_direction & DATA_DIR_RX)
942 tspi->cur_pos = tspi->cur_rx_pos;
943 else
944 WARN_ON(1);
945
946 dev_vdbg(&tspi->pdev->dev, "current position %d and length of the "
947 "transfer %d\n", tspi->cur_pos, t->len);
948 if (tspi->cur_pos == t->len) {
949 spi_tegra_curr_transfer_complete(tspi,
950 tspi->tx_status || tspi->rx_status, t->len);
951 goto exit;
952 }
953
954 /* There should not be remaining transfer */
955 BUG();
956exit:
957 spin_unlock_irqrestore(&tspi->lock, flags);
958 return;
959}
960
961static irqreturn_t spi_tegra_isr_thread(int irq, void *context_data)
962{
963 struct spi_tegra_data *tspi = context_data;
964 struct spi_transfer *t = tspi->cur;
965 long wait_status;
966 int err = 0;
967 unsigned long flags;
968
969 if (!tspi->is_curr_dma_xfer) {
970 handle_cpu_based_xfer(context_data);
971 return IRQ_HANDLED;
972 }
973
974 /* Abort dmas if any error */
975 if (tspi->cur_direction & DATA_DIR_TX) {
976 if (tspi->tx_status) {
977 tegra_dma_dequeue_req(tspi->tx_dma, &tspi->tx_dma_req);
978 err += 1;
979 } else {
980 wait_status = wait_for_completion_interruptible_timeout(
981 &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
982 if (wait_status <= 0) {
983 tegra_dma_dequeue_req(tspi->tx_dma,
984 &tspi->tx_dma_req);
985 dev_err(&tspi->pdev->dev, "Error in Dma Tx "
986 "transfer\n");
987 err += 1;
988 }
989 }
990 }
991
992 if (tspi->cur_direction & DATA_DIR_RX) {
993 if (tspi->rx_status) {
994 tegra_dma_dequeue_req(tspi->rx_dma, &tspi->rx_dma_req);
995 err += 2;
996 } else {
997 wait_status = wait_for_completion_interruptible_timeout(
998 &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
999 if (wait_status <= 0) {
1000 tegra_dma_dequeue_req(tspi->rx_dma,
1001 &tspi->rx_dma_req);
1002 dev_err(&tspi->pdev->dev, "Error in Dma Rx "
1003 "transfer\n");
1004 err += 2;
1005 }
1006 }
1007 }
1008
1009 spin_lock_irqsave(&tspi->lock, flags);
1010 if (err) {
1011 dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
1012 __func__, tspi->status_reg);
1013 tegra_periph_reset_assert(tspi->clk);
1014 udelay(2);
1015 tegra_periph_reset_deassert(tspi->clk);
1016 WARN_ON(1);
1017 spi_tegra_curr_transfer_complete(tspi, err, t->len);
1018 spin_unlock_irqrestore(&tspi->lock, flags);
1019 return IRQ_HANDLED;
1020 }
1021
1022 if (tspi->cur_direction & DATA_DIR_RX)
1023 spi_tegra_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
1024
1025 if (tspi->cur_direction & DATA_DIR_TX)
1026 tspi->cur_pos = tspi->cur_tx_pos;
1027 else if (tspi->cur_direction & DATA_DIR_RX)
1028 tspi->cur_pos = tspi->cur_rx_pos;
1029 else
1030 WARN_ON(1);
1031
1032 if (tspi->cur_pos == t->len) {
1033 spi_tegra_curr_transfer_complete(tspi,
1034 tspi->tx_status || tspi->rx_status, t->len);
1035 spin_unlock_irqrestore(&tspi->lock, flags);
1036 return IRQ_HANDLED;
1037 }
1038
1039 spin_unlock_irqrestore(&tspi->lock, flags);
1040
1041 /* There should not be remaining transfer */
1042 BUG();
1043 return IRQ_HANDLED;
1044}
1045
1046static irqreturn_t spi_tegra_isr(int irq, void *context_data)
1047{
1048 struct spi_tegra_data *tspi = context_data;
1049
1050 tspi->status_reg = spi_tegra_readl(tspi, SLINK_STATUS);
1051 if (tspi->cur_direction & DATA_DIR_TX)
1052 tspi->tx_status = tspi->status_reg &
1053 (SLINK_TX_OVF | SLINK_TX_UNF);
1054
1055 if (tspi->cur_direction & DATA_DIR_RX)
1056 tspi->rx_status = tspi->status_reg &
1057 (SLINK_RX_OVF | SLINK_RX_UNF);
1058 spi_tegra_clear_status(tspi);
1059
1060
1061 return IRQ_WAKE_THREAD;
1062}
1063
1064static int __init spi_tegra_probe(struct platform_device *pdev)
1065{
1066 struct spi_master *master;
1067 struct spi_tegra_data *tspi;
1068 struct resource *r;
1069 struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
1070 int ret;
1071 int i;
1072
1073 master = spi_alloc_master(&pdev->dev, sizeof *tspi);
1074 if (master == NULL) {
1075 dev_err(&pdev->dev, "master allocation failed\n");
1076 return -ENOMEM;
1077 }
1078
1079 /* the spi->mode bits understood by this driver: */
1080 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1081
1082 if (pdev->id != -1)
1083 master->bus_num = pdev->id;
1084
1085 master->setup = spi_tegra_setup;
1086 master->transfer = spi_tegra_transfer;
1087 master->num_chipselect = MAX_CHIP_SELECT;
1088
1089 dev_set_drvdata(&pdev->dev, master);
1090 tspi = spi_master_get_devdata(master);
1091 tspi->master = master;
1092 tspi->pdev = pdev;
1093 spin_lock_init(&tspi->lock);
1094
1095 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1096 if (r == NULL) {
1097 ret = -ENODEV;
1098 goto fail_no_mem;
1099 }
1100
1101 if (!request_mem_region(r->start, (r->end - r->start) + 1,
1102 dev_name(&pdev->dev))) {
1103 ret = -EBUSY;
1104 goto fail_no_mem;
1105 }
1106
1107 tspi->phys = r->start;
1108 tspi->base = ioremap(r->start, r->end - r->start + 1);
1109 if (!tspi->base) {
1110 dev_err(&pdev->dev, "can't ioremap iomem\n");
1111 ret = -ENOMEM;
1112 goto fail_io_map;
1113 }
1114
1115 tspi->irq = platform_get_irq(pdev, 0);
1116 if (unlikely(tspi->irq < 0)) {
1117 dev_err(&pdev->dev, "can't find irq resource\n");
1118 ret = -ENXIO;
1119 goto fail_irq_req;
1120 }
1121
1122 sprintf(tspi->port_name, "tegra_spi_%d", pdev->id);
1123 ret = request_threaded_irq(tspi->irq, spi_tegra_isr,
1124 spi_tegra_isr_thread, IRQF_DISABLED,
1125 tspi->port_name, tspi);
1126 if (ret < 0) {
1127 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1128 tspi->irq);
1129 goto fail_irq_req;
1130 }
1131
1132 tspi->clk = clk_get(&pdev->dev, NULL);
1133 if (IS_ERR_OR_NULL(tspi->clk)) {
1134 dev_err(&pdev->dev, "can not get clock\n");
1135 ret = PTR_ERR(tspi->clk);
1136 goto fail_clk_get;
1137 }
1138
1139 INIT_LIST_HEAD(&tspi->queue);
1140
1141 if (pdata) {
1142 tspi->is_clkon_always = pdata->is_clkon_always;
1143 tspi->is_dma_allowed = pdata->is_dma_based;
1144 tspi->dma_buf_size = (pdata->max_dma_buffer) ?
1145 pdata->max_dma_buffer : DEFAULT_SPI_DMA_BUF_LEN;
1146 tspi->parent_clk_count = pdata->parent_clk_count;
1147 tspi->parent_clk_list = pdata->parent_clk_list;
1148 tspi->max_rate = pdata->max_rate;
1149 } else {
1150 tspi->is_clkon_always = false;
1151 tspi->is_dma_allowed = true;
1152 tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1153 tspi->parent_clk_count = 0;
1154 tspi->parent_clk_list = NULL;
1155 tspi->max_rate = 0;
1156 }
1157
1158 tspi->max_parent_rate = 0;
1159 tspi->min_div = 0;
1160
1161 if (tspi->parent_clk_count) {
1162 tspi->max_parent_rate = tspi->parent_clk_list[0].fixed_clk_rate;
1163 for (i = 1; i < tspi->parent_clk_count; ++i) {
1164 tspi->max_parent_rate = max(tspi->max_parent_rate,
1165 tspi->parent_clk_list[i].fixed_clk_rate);
1166 }
1167 if (tspi->max_rate)
1168 tspi->min_div = DIV_ROUND_UP(tspi->max_parent_rate,
1169 tspi->max_rate);
1170 }
1171 tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
1172
1173 if (!tspi->is_dma_allowed)
1174 goto skip_dma_alloc;
1175
1176 init_completion(&tspi->tx_dma_complete);
1177 init_completion(&tspi->rx_dma_complete);
1178
1179
1180 tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
1181 "spi_rx_%d", pdev->id);
1182 if (!tspi->rx_dma) {
1183 dev_err(&pdev->dev, "can not allocate rx dma channel\n");
1184 ret = -ENODEV;
1185 goto fail_rx_dma_alloc;
1186 }
1187
1188 tspi->rx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
1189 &tspi->rx_buf_phys, GFP_KERNEL);
1190 if (!tspi->rx_buf) {
1191 dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
1192 ret = -ENOMEM;
1193 goto fail_rx_buf_alloc;
1194 }
1195
1196 memset(&tspi->rx_dma_req, 0, sizeof(struct tegra_dma_req));
1197 tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
1198 tspi->rx_dma_req.to_memory = 1;
1199 tspi->rx_dma_req.dest_addr = tspi->rx_buf_phys;
1200 tspi->rx_dma_req.virt_addr = tspi->rx_buf;
1201 tspi->rx_dma_req.dest_bus_width = 32;
1202 tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
1203 tspi->rx_dma_req.source_bus_width = 32;
1204 tspi->rx_dma_req.source_wrap = 4;
1205 tspi->rx_dma_req.dest_wrap = 0;
1206 tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
1207 tspi->rx_dma_req.dev = tspi;
1208
1209 tspi->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
1210 "spi_tx_%d", pdev->id);
1211 if (!tspi->tx_dma) {
1212 dev_err(&pdev->dev, "can not allocate tx dma channel\n");
1213 ret = -ENODEV;
1214 goto fail_tx_dma_alloc;
1215 }
1216
1217 tspi->tx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
1218 &tspi->tx_buf_phys, GFP_KERNEL);
1219 if (!tspi->tx_buf) {
1220 dev_err(&pdev->dev, "can not allocate tx bounce buffer\n");
1221 ret = -ENOMEM;
1222 goto fail_tx_buf_alloc;
1223 }
1224
1225 memset(&tspi->tx_dma_req, 0, sizeof(struct tegra_dma_req));
1226 tspi->tx_dma_req.complete = tegra_spi_tx_dma_complete;
1227 tspi->tx_dma_req.to_memory = 0;
1228 tspi->tx_dma_req.dest_addr = tspi->phys + SLINK_TX_FIFO;
1229 tspi->tx_dma_req.virt_addr = tspi->tx_buf;
1230 tspi->tx_dma_req.dest_bus_width = 32;
1231 tspi->tx_dma_req.dest_wrap = 4;
1232 tspi->tx_dma_req.source_wrap = 0;
1233 tspi->tx_dma_req.source_addr = tspi->tx_buf_phys;
1234 tspi->tx_dma_req.source_bus_width = 32;
1235 tspi->tx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
1236 tspi->tx_dma_req.dev = tspi;
1237 tspi->max_buf_size = tspi->dma_buf_size;
1238 tspi->def_command_reg = SLINK_CS_SW;
1239 tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
1240
1241skip_dma_alloc:
1242 clk_enable(tspi->clk);
1243 tspi->clk_state = 1;
1244 spi_tegra_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
1245 ret = spi_register_master(master);
1246 if (!tspi->is_clkon_always) {
1247 if (tspi->clk_state) {
1248 clk_disable(tspi->clk);
1249 tspi->clk_state = 0;
1250 }
1251 }
1252
1253 if (ret < 0) {
1254 dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1255 goto fail_master_register;
1256 }
1257 return ret;
1258
1259fail_master_register:
1260 if (tspi->tx_buf)
1261 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1262 tspi->tx_buf, tspi->tx_buf_phys);
1263fail_tx_buf_alloc:
1264 if (tspi->tx_dma)
1265 tegra_dma_free_channel(tspi->tx_dma);
1266fail_tx_dma_alloc:
1267 if (tspi->rx_buf)
1268 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1269 tspi->rx_buf, tspi->rx_buf_phys);
1270fail_rx_buf_alloc:
1271 if (tspi->rx_dma)
1272 tegra_dma_free_channel(tspi->rx_dma);
1273fail_rx_dma_alloc:
1274 clk_put(tspi->clk);
1275fail_clk_get:
1276 free_irq(tspi->irq, tspi);
1277fail_irq_req:
1278 iounmap(tspi->base);
1279fail_io_map:
1280 release_mem_region(r->start, (r->end - r->start) + 1);
1281fail_no_mem:
1282 spi_master_put(master);
1283 return ret;
1284}
1285
1286static int __devexit spi_tegra_remove(struct platform_device *pdev)
1287{
1288 struct spi_master *master;
1289 struct spi_tegra_data *tspi;
1290 struct resource *r;
1291
1292 master = dev_get_drvdata(&pdev->dev);
1293 tspi = spi_master_get_devdata(master);
1294
1295 if (tspi->tx_buf)
1296 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1297 tspi->tx_buf, tspi->tx_buf_phys);
1298 if (tspi->tx_dma)
1299 tegra_dma_free_channel(tspi->tx_dma);
1300 if (tspi->rx_buf)
1301 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1302 tspi->rx_buf, tspi->rx_buf_phys);
1303 if (tspi->rx_dma)
1304 tegra_dma_free_channel(tspi->rx_dma);
1305
1306 if (tspi->is_clkon_always) {
1307 clk_disable(tspi->clk);
1308 tspi->clk_state = 0;
1309 }
1310
1311 clk_put(tspi->clk);
1312 iounmap(tspi->base);
1313
1314 spi_master_put(master);
1315 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1316 release_mem_region(r->start, (r->end - r->start) + 1);
1317
1318 return 0;
1319}
1320
1321#ifdef CONFIG_PM
1322static int spi_tegra_suspend(struct platform_device *pdev, pm_message_t state)
1323{
1324 struct spi_master *master;
1325 struct spi_tegra_data *tspi;
1326 unsigned long flags;
1327 unsigned limit = 50;
1328
1329 master = dev_get_drvdata(&pdev->dev);
1330 tspi = spi_master_get_devdata(master);
1331 spin_lock_irqsave(&tspi->lock, flags);
1332 tspi->is_suspended = true;
1333
1334 WARN_ON(!list_empty(&tspi->queue));
1335
1336 while (!list_empty(&tspi->queue) && limit--) {
1337 spin_unlock_irqrestore(&tspi->lock, flags);
1338 msleep(20);
1339 spin_lock_irqsave(&tspi->lock, flags);
1340 }
1341
1342 spin_unlock_irqrestore(&tspi->lock, flags);
1343 if (tspi->is_clkon_always) {
1344 clk_disable(tspi->clk);
1345 tspi->clk_state = 0;
1346 }
1347 return 0;
1348}
1349
1350static int spi_tegra_resume(struct platform_device *pdev)
1351{
1352 struct spi_master *master;
1353 struct spi_tegra_data *tspi;
1354 unsigned long flags;
1355
1356 master = dev_get_drvdata(&pdev->dev);
1357 tspi = spi_master_get_devdata(master);
1358
1359 spin_lock_irqsave(&tspi->lock, flags);
1360 clk_enable(tspi->clk);
1361 tspi->clk_state = 1;
1362 spi_tegra_writel(tspi, tspi->command_reg, SLINK_COMMAND);
1363 if (!tspi->is_clkon_always) {
1364 clk_disable(tspi->clk);
1365 tspi->clk_state = 0;
1366 }
1367
1368 tspi->cur_speed = 0;
1369 tspi->is_suspended = false;
1370 spin_unlock_irqrestore(&tspi->lock, flags);
1371 return 0;
1372}
1373#endif
1374
1375MODULE_ALIAS("platform:spi_slave_tegra");
1376
1377static struct platform_driver spi_tegra_driver = {
1378 .driver = {
1379 .name = "spi_slave_tegra",
1380 .owner = THIS_MODULE,
1381 },
1382 .remove = __devexit_p(spi_tegra_remove),
1383#ifdef CONFIG_PM
1384 .suspend = spi_tegra_suspend,
1385 .resume = spi_tegra_resume,
1386#endif
1387};
1388
1389static int __init spi_tegra_init(void)
1390{
1391 return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
1392}
1393subsys_initcall(spi_tegra_init);
1394
1395static void __exit spi_tegra_exit(void)
1396{
1397 platform_driver_unregister(&spi_tegra_driver);
1398}
1399module_exit(spi_tegra_exit);
1400
1401MODULE_LICENSE("GPL");