aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-davinci.c292
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-pl022.c3
-rw-r--r--drivers/spi/spi-rspi.c56
-rw-r--r--drivers/spi/spi-s3c64xx.c6
6 files changed, 170 insertions, 191 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8c2ff2490d99..1acae359cabe 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -134,6 +134,7 @@ config SPI_DAVINCI
134 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" 134 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
135 depends on ARCH_DAVINCI 135 depends on ARCH_DAVINCI
136 select SPI_BITBANG 136 select SPI_BITBANG
137 select TI_EDMA
137 help 138 help
138 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 139 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
139 140
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 3afe2f4f5b8e..147dfa87a64b 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -25,13 +25,14 @@
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/dmaengine.h>
28#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/edma.h>
29#include <linux/spi/spi.h> 31#include <linux/spi/spi.h>
30#include <linux/spi/spi_bitbang.h> 32#include <linux/spi/spi_bitbang.h>
31#include <linux/slab.h> 33#include <linux/slab.h>
32 34
33#include <linux/platform_data/spi-davinci.h> 35#include <linux/platform_data/spi-davinci.h>
34#include <mach/edma.h>
35 36
36#define SPI_NO_RESOURCE ((resource_size_t)-1) 37#define SPI_NO_RESOURCE ((resource_size_t)-1)
37 38
@@ -113,14 +114,6 @@
113#define SPIDEF 0x4c 114#define SPIDEF 0x4c
114#define SPIFMT0 0x50 115#define SPIFMT0 0x50
115 116
116/* We have 2 DMA channels per CS, one for RX and one for TX */
117struct davinci_spi_dma {
118 int tx_channel;
119 int rx_channel;
120 int dummy_param_slot;
121 enum dma_event_q eventq;
122};
123
124/* SPI Controller driver's private data. */ 117/* SPI Controller driver's private data. */
125struct davinci_spi { 118struct davinci_spi {
126 struct spi_bitbang bitbang; 119 struct spi_bitbang bitbang;
@@ -134,11 +127,14 @@ struct davinci_spi {
134 127
135 const void *tx; 128 const void *tx;
136 void *rx; 129 void *rx;
137#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1)
138 u8 rx_tmp_buf[SPI_TMP_BUFSZ];
139 int rcount; 130 int rcount;
140 int wcount; 131 int wcount;
141 struct davinci_spi_dma dma; 132
133 struct dma_chan *dma_rx;
134 struct dma_chan *dma_tx;
135 int dma_rx_chnum;
136 int dma_tx_chnum;
137
142 struct davinci_spi_platform_data *pdata; 138 struct davinci_spi_platform_data *pdata;
143 139
144 void (*get_rx)(u32 rx_data, struct davinci_spi *); 140 void (*get_rx)(u32 rx_data, struct davinci_spi *);
@@ -496,21 +492,23 @@ out:
496 return errors; 492 return errors;
497} 493}
498 494
499static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) 495static void davinci_spi_dma_rx_callback(void *data)
500{ 496{
501 struct davinci_spi *dspi = data; 497 struct davinci_spi *dspi = (struct davinci_spi *)data;
502 struct davinci_spi_dma *dma = &dspi->dma;
503 498
504 edma_stop(lch); 499 dspi->rcount = 0;
505 500
506 if (status == DMA_COMPLETE) { 501 if (!dspi->wcount && !dspi->rcount)
507 if (lch == dma->rx_channel) 502 complete(&dspi->done);
508 dspi->rcount = 0; 503}
509 if (lch == dma->tx_channel)
510 dspi->wcount = 0;
511 }
512 504
513 if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) 505static void davinci_spi_dma_tx_callback(void *data)
506{
507 struct davinci_spi *dspi = (struct davinci_spi *)data;
508
509 dspi->wcount = 0;
510
511 if (!dspi->wcount && !dspi->rcount)
514 complete(&dspi->done); 512 complete(&dspi->done);
515} 513}
516 514
@@ -526,20 +524,20 @@ static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
526static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 524static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
527{ 525{
528 struct davinci_spi *dspi; 526 struct davinci_spi *dspi;
529 int data_type, ret; 527 int data_type, ret = -ENOMEM;
530 u32 tx_data, spidat1; 528 u32 tx_data, spidat1;
531 u32 errors = 0; 529 u32 errors = 0;
532 struct davinci_spi_config *spicfg; 530 struct davinci_spi_config *spicfg;
533 struct davinci_spi_platform_data *pdata; 531 struct davinci_spi_platform_data *pdata;
534 unsigned uninitialized_var(rx_buf_count); 532 unsigned uninitialized_var(rx_buf_count);
535 struct device *sdev; 533 void *dummy_buf = NULL;
534 struct scatterlist sg_rx, sg_tx;
536 535
537 dspi = spi_master_get_devdata(spi->master); 536 dspi = spi_master_get_devdata(spi->master);
538 pdata = dspi->pdata; 537 pdata = dspi->pdata;
539 spicfg = (struct davinci_spi_config *)spi->controller_data; 538 spicfg = (struct davinci_spi_config *)spi->controller_data;
540 if (!spicfg) 539 if (!spicfg)
541 spicfg = &davinci_spi_default_cfg; 540 spicfg = &davinci_spi_default_cfg;
542 sdev = dspi->bitbang.master->dev.parent;
543 541
544 /* convert len to words based on bits_per_word */ 542 /* convert len to words based on bits_per_word */
545 data_type = dspi->bytes_per_word[spi->chip_select]; 543 data_type = dspi->bytes_per_word[spi->chip_select];
@@ -567,112 +565,83 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
567 spidat1 |= tx_data & 0xFFFF; 565 spidat1 |= tx_data & 0xFFFF;
568 iowrite32(spidat1, dspi->base + SPIDAT1); 566 iowrite32(spidat1, dspi->base + SPIDAT1);
569 } else { 567 } else {
570 struct davinci_spi_dma *dma; 568 struct dma_slave_config dma_rx_conf = {
571 unsigned long tx_reg, rx_reg; 569 .direction = DMA_DEV_TO_MEM,
572 struct edmacc_param param; 570 .src_addr = (unsigned long)dspi->pbase + SPIBUF,
573 void *rx_buf; 571 .src_addr_width = data_type,
574 int b, c; 572 .src_maxburst = 1,
575 573 };
576 dma = &dspi->dma; 574 struct dma_slave_config dma_tx_conf = {
577 575 .direction = DMA_MEM_TO_DEV,
578 tx_reg = (unsigned long)dspi->pbase + SPIDAT1; 576 .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
579 rx_reg = (unsigned long)dspi->pbase + SPIBUF; 577 .dst_addr_width = data_type,
580 578 .dst_maxburst = 1,
581 /* 579 };
582 * Transmit DMA setup 580 struct dma_async_tx_descriptor *rxdesc;
583 * 581 struct dma_async_tx_descriptor *txdesc;
584 * If there is transmit data, map the transmit buffer, set it 582 void *buf;
585 * as the source of data and set the source B index to data 583
586 * size. If there is no transmit data, set the transmit register 584 dummy_buf = kzalloc(t->len, GFP_KERNEL);
587 * as the source of data, and set the source B index to zero. 585 if (!dummy_buf)
588 * 586 goto err_alloc_dummy_buf;
589 * The destination is always the transmit register itself. And 587
590 * the destination never increments. 588 dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
591 */ 589 dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
592 590
593 if (t->tx_buf) { 591 sg_init_table(&sg_rx, 1);
594 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, 592 if (!t->rx_buf)
595 t->len, DMA_TO_DEVICE); 593 buf = dummy_buf;
596 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
597 dev_dbg(sdev, "Unable to DMA map %d bytes"
598 "TX buffer\n", t->len);
599 return -ENOMEM;
600 }
601 }
602
603 /*
604 * If number of words is greater than 65535, then we need
605 * to configure a 3 dimension transfer. Use the BCNTRLD
606 * feature to allow for transfers that aren't even multiples
607 * of 65535 (or any other possible b size) by first transferring
608 * the remainder amount then grabbing the next N blocks of
609 * 65535 words.
610 */
611
612 c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */
613 b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */
614 if (b)
615 c++;
616 else 594 else
617 b = SZ_64K - 1; 595 buf = t->rx_buf;
618 596 t->rx_dma = dma_map_single(&spi->dev, buf,
619 param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); 597 t->len, DMA_FROM_DEVICE);
620 param.src = t->tx_buf ? t->tx_dma : tx_reg; 598 if (!t->rx_dma) {
621 param.a_b_cnt = b << 16 | data_type; 599 ret = -EFAULT;
622 param.dst = tx_reg; 600 goto err_rx_map;
623 param.src_dst_bidx = t->tx_buf ? data_type : 0;
624 param.link_bcntrld = 0xffffffff;
625 param.src_dst_cidx = t->tx_buf ? data_type : 0;
626 param.ccnt = c;
627 edma_write_slot(dma->tx_channel, &param);
628 edma_link(dma->tx_channel, dma->dummy_param_slot);
629
630 /*
631 * Receive DMA setup
632 *
633 * If there is receive buffer, use it to receive data. If there
634 * is none provided, use a temporary receive buffer. Set the
635 * destination B index to 0 so effectively only one byte is used
636 * in the temporary buffer (address does not increment).
637 *
638 * The source of receive data is the receive data register. The
639 * source address never increments.
640 */
641
642 if (t->rx_buf) {
643 rx_buf = t->rx_buf;
644 rx_buf_count = t->len;
645 } else {
646 rx_buf = dspi->rx_tmp_buf;
647 rx_buf_count = sizeof(dspi->rx_tmp_buf);
648 } 601 }
602 sg_dma_address(&sg_rx) = t->rx_dma;
603 sg_dma_len(&sg_rx) = t->len;
649 604
650 t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, 605 sg_init_table(&sg_tx, 1);
651 DMA_FROM_DEVICE); 606 if (!t->tx_buf)
652 if (dma_mapping_error(&spi->dev, t->rx_dma)) { 607 buf = dummy_buf;
653 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", 608 else
654 rx_buf_count); 609 buf = (void *)t->tx_buf;
655 if (t->tx_buf) 610 t->tx_dma = dma_map_single(&spi->dev, buf,
656 dma_unmap_single(&spi->dev, t->tx_dma, t->len, 611 t->len, DMA_FROM_DEVICE);
657 DMA_TO_DEVICE); 612 if (!t->tx_dma) {
658 return -ENOMEM; 613 ret = -EFAULT;
614 goto err_tx_map;
659 } 615 }
660 616 sg_dma_address(&sg_tx) = t->tx_dma;
661 param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); 617 sg_dma_len(&sg_tx) = t->len;
662 param.src = rx_reg; 618
663 param.a_b_cnt = b << 16 | data_type; 619 rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
664 param.dst = t->rx_dma; 620 &sg_rx, 1, DMA_DEV_TO_MEM,
665 param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; 621 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
666 param.link_bcntrld = 0xffffffff; 622 if (!rxdesc)
667 param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16; 623 goto err_desc;
668 param.ccnt = c; 624
669 edma_write_slot(dma->rx_channel, &param); 625 txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
626 &sg_tx, 1, DMA_MEM_TO_DEV,
627 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
628 if (!txdesc)
629 goto err_desc;
630
631 rxdesc->callback = davinci_spi_dma_rx_callback;
632 rxdesc->callback_param = (void *)dspi;
633 txdesc->callback = davinci_spi_dma_tx_callback;
634 txdesc->callback_param = (void *)dspi;
670 635
671 if (pdata->cshold_bug) 636 if (pdata->cshold_bug)
672 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); 637 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
673 638
674 edma_start(dma->rx_channel); 639 dmaengine_submit(rxdesc);
675 edma_start(dma->tx_channel); 640 dmaengine_submit(txdesc);
641
642 dma_async_issue_pending(dspi->dma_rx);
643 dma_async_issue_pending(dspi->dma_tx);
644
676 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 645 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
677 } 646 }
678 647
@@ -690,15 +659,13 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
690 659
691 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); 660 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
692 if (spicfg->io_type == SPI_IO_TYPE_DMA) { 661 if (spicfg->io_type == SPI_IO_TYPE_DMA) {
693
694 if (t->tx_buf)
695 dma_unmap_single(&spi->dev, t->tx_dma, t->len,
696 DMA_TO_DEVICE);
697
698 dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
699 DMA_FROM_DEVICE);
700
701 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 662 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
663
664 dma_unmap_single(&spi->dev, t->rx_dma,
665 t->len, DMA_FROM_DEVICE);
666 dma_unmap_single(&spi->dev, t->tx_dma,
667 t->len, DMA_TO_DEVICE);
668 kfree(dummy_buf);
702 } 669 }
703 670
704 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 671 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
@@ -716,11 +683,20 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
716 } 683 }
717 684
718 if (dspi->rcount != 0 || dspi->wcount != 0) { 685 if (dspi->rcount != 0 || dspi->wcount != 0) {
719 dev_err(sdev, "SPI data transfer error\n"); 686 dev_err(&spi->dev, "SPI data transfer error\n");
720 return -EIO; 687 return -EIO;
721 } 688 }
722 689
723 return t->len; 690 return t->len;
691
692err_desc:
693 dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
694err_tx_map:
695 dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
696err_rx_map:
697 kfree(dummy_buf);
698err_alloc_dummy_buf:
699 return ret;
724} 700}
725 701
726/** 702/**
@@ -751,39 +727,33 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *data)
751 727
752static int davinci_spi_request_dma(struct davinci_spi *dspi) 728static int davinci_spi_request_dma(struct davinci_spi *dspi)
753{ 729{
730 dma_cap_mask_t mask;
731 struct device *sdev = dspi->bitbang.master->dev.parent;
754 int r; 732 int r;
755 struct davinci_spi_dma *dma = &dspi->dma;
756 733
757 r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, 734 dma_cap_zero(mask);
758 dma->eventq); 735 dma_cap_set(DMA_SLAVE, mask);
759 if (r < 0) { 736
760 pr_err("Unable to request DMA channel for SPI RX\n"); 737 dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
761 r = -EAGAIN; 738 &dspi->dma_rx_chnum);
739 if (!dspi->dma_rx) {
740 dev_err(sdev, "request RX DMA channel failed\n");
741 r = -ENODEV;
762 goto rx_dma_failed; 742 goto rx_dma_failed;
763 } 743 }
764 744
765 r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, 745 dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
766 dma->eventq); 746 &dspi->dma_tx_chnum);
767 if (r < 0) { 747 if (!dspi->dma_tx) {
768 pr_err("Unable to request DMA channel for SPI TX\n"); 748 dev_err(sdev, "request TX DMA channel failed\n");
769 r = -EAGAIN; 749 r = -ENODEV;
770 goto tx_dma_failed; 750 goto tx_dma_failed;
771 } 751 }
772 752
773 r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY);
774 if (r < 0) {
775 pr_err("Unable to request SPI TX DMA param slot\n");
776 r = -EAGAIN;
777 goto param_failed;
778 }
779 dma->dummy_param_slot = r;
780 edma_link(dma->dummy_param_slot, dma->dummy_param_slot);
781
782 return 0; 753 return 0;
783param_failed: 754
784 edma_free_channel(dma->tx_channel);
785tx_dma_failed: 755tx_dma_failed:
786 edma_free_channel(dma->rx_channel); 756 dma_release_channel(dspi->dma_rx);
787rx_dma_failed: 757rx_dma_failed:
788 return r; 758 return r;
789} 759}
@@ -898,9 +868,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
898 dspi->bitbang.txrx_bufs = davinci_spi_bufs; 868 dspi->bitbang.txrx_bufs = davinci_spi_bufs;
899 if (dma_rx_chan != SPI_NO_RESOURCE && 869 if (dma_rx_chan != SPI_NO_RESOURCE &&
900 dma_tx_chan != SPI_NO_RESOURCE) { 870 dma_tx_chan != SPI_NO_RESOURCE) {
901 dspi->dma.rx_channel = dma_rx_chan; 871 dspi->dma_rx_chnum = dma_rx_chan;
902 dspi->dma.tx_channel = dma_tx_chan; 872 dspi->dma_tx_chnum = dma_tx_chan;
903 dspi->dma.eventq = pdata->dma_event_q;
904 873
905 ret = davinci_spi_request_dma(dspi); 874 ret = davinci_spi_request_dma(dspi);
906 if (ret) 875 if (ret)
@@ -955,9 +924,8 @@ static int __devinit davinci_spi_probe(struct platform_device *pdev)
955 return ret; 924 return ret;
956 925
957free_dma: 926free_dma:
958 edma_free_channel(dspi->dma.tx_channel); 927 dma_release_channel(dspi->dma_rx);
959 edma_free_channel(dspi->dma.rx_channel); 928 dma_release_channel(dspi->dma_tx);
960 edma_free_slot(dspi->dma.dummy_param_slot);
961free_clk: 929free_clk:
962 clk_disable(dspi->clk); 930 clk_disable(dspi->clk);
963 clk_put(dspi->clk); 931 clk_put(dspi->clk);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index edf1360ab09e..86dd04d6bc87 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -323,6 +323,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
323 if (!ret) { 323 if (!ret) {
324 dev_err(ssp->dev, "DMA transfer timeout\n"); 324 dev_err(ssp->dev, "DMA transfer timeout\n");
325 ret = -ETIMEDOUT; 325 ret = -ETIMEDOUT;
326 dmaengine_terminate_all(ssp->dmach);
326 goto err_vmalloc; 327 goto err_vmalloc;
327 } 328 }
328 329
@@ -480,7 +481,7 @@ static int mxs_spi_transfer_one(struct spi_master *master,
480 first = last = 0; 481 first = last = 0;
481 } 482 }
482 483
483 m->status = 0; 484 m->status = status;
484 spi_finalize_current_message(master); 485 spi_finalize_current_message(master);
485 486
486 return status; 487 return status;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 919464102d33..a1db91a99b89 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2186,8 +2186,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2186 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", 2186 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
2187 adev->res.start, pl022->virtbase); 2187 adev->res.start, pl022->virtbase);
2188 2188
2189 pm_runtime_resume(dev);
2190
2191 pl022->clk = devm_clk_get(&adev->dev, NULL); 2189 pl022->clk = devm_clk_get(&adev->dev, NULL);
2192 if (IS_ERR(pl022->clk)) { 2190 if (IS_ERR(pl022->clk)) {
2193 status = PTR_ERR(pl022->clk); 2191 status = PTR_ERR(pl022->clk);
@@ -2292,7 +2290,6 @@ pl022_remove(struct amba_device *adev)
2292 2290
2293 clk_disable(pl022->clk); 2291 clk_disable(pl022->clk);
2294 clk_unprepare(pl022->clk); 2292 clk_unprepare(pl022->clk);
2295 pm_runtime_disable(&adev->dev);
2296 amba_release_regions(adev); 2293 amba_release_regions(adev);
2297 tasklet_disable(&pl022->pump_transfers); 2294 tasklet_disable(&pl022->pump_transfers);
2298 spi_unregister_master(pl022->master); 2295 spi_unregister_master(pl022->master);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 4894bde4bbff..30faf6d4ab91 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -147,8 +147,6 @@ struct rspi_data {
147 unsigned char spsr; 147 unsigned char spsr;
148 148
149 /* for dmaengine */ 149 /* for dmaengine */
150 struct sh_dmae_slave dma_tx;
151 struct sh_dmae_slave dma_rx;
152 struct dma_chan *chan_tx; 150 struct dma_chan *chan_tx;
153 struct dma_chan *chan_rx; 151 struct dma_chan *chan_rx;
154 int irq; 152 int irq;
@@ -663,20 +661,16 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
663 return ret; 661 return ret;
664} 662}
665 663
666static bool rspi_filter(struct dma_chan *chan, void *filter_param) 664static int __devinit rspi_request_dma(struct rspi_data *rspi,
667{ 665 struct platform_device *pdev)
668 chan->private = filter_param;
669 return true;
670}
671
672static void __devinit rspi_request_dma(struct rspi_data *rspi,
673 struct platform_device *pdev)
674{ 666{
675 struct rspi_plat_data *rspi_pd = pdev->dev.platform_data; 667 struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
676 dma_cap_mask_t mask; 668 dma_cap_mask_t mask;
669 struct dma_slave_config cfg;
670 int ret;
677 671
678 if (!rspi_pd) 672 if (!rspi_pd)
679 return; 673 return 0; /* The driver assumes no error. */
680 674
681 rspi->dma_width_16bit = rspi_pd->dma_width_16bit; 675 rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
682 676
@@ -684,21 +678,35 @@ static void __devinit rspi_request_dma(struct rspi_data *rspi,
684 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) { 678 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
685 dma_cap_zero(mask); 679 dma_cap_zero(mask);
686 dma_cap_set(DMA_SLAVE, mask); 680 dma_cap_set(DMA_SLAVE, mask);
687 rspi->dma_rx.slave_id = rspi_pd->dma_rx_id; 681 rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
688 rspi->chan_rx = dma_request_channel(mask, rspi_filter, 682 (void *)rspi_pd->dma_rx_id);
689 &rspi->dma_rx); 683 if (rspi->chan_rx) {
690 if (rspi->chan_rx) 684 cfg.slave_id = rspi_pd->dma_rx_id;
691 dev_info(&pdev->dev, "Use DMA when rx.\n"); 685 cfg.direction = DMA_DEV_TO_MEM;
686 ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
687 if (!ret)
688 dev_info(&pdev->dev, "Use DMA when rx.\n");
689 else
690 return ret;
691 }
692 } 692 }
693 if (rspi_pd->dma_tx_id) { 693 if (rspi_pd->dma_tx_id) {
694 dma_cap_zero(mask); 694 dma_cap_zero(mask);
695 dma_cap_set(DMA_SLAVE, mask); 695 dma_cap_set(DMA_SLAVE, mask);
696 rspi->dma_tx.slave_id = rspi_pd->dma_tx_id; 696 rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
697 rspi->chan_tx = dma_request_channel(mask, rspi_filter, 697 (void *)rspi_pd->dma_tx_id);
698 &rspi->dma_tx); 698 if (rspi->chan_tx) {
699 if (rspi->chan_tx) 699 cfg.slave_id = rspi_pd->dma_tx_id;
700 dev_info(&pdev->dev, "Use DMA when tx\n"); 700 cfg.direction = DMA_MEM_TO_DEV;
701 ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
702 if (!ret)
703 dev_info(&pdev->dev, "Use DMA when tx\n");
704 else
705 return ret;
706 }
701 } 707 }
708
709 return 0;
702} 710}
703 711
704static void __devexit rspi_release_dma(struct rspi_data *rspi) 712static void __devexit rspi_release_dma(struct rspi_data *rspi)
@@ -788,7 +796,11 @@ static int __devinit rspi_probe(struct platform_device *pdev)
788 } 796 }
789 797
790 rspi->irq = irq; 798 rspi->irq = irq;
791 rspi_request_dma(rspi, pdev); 799 ret = rspi_request_dma(rspi, pdev);
800 if (ret < 0) {
801 dev_err(&pdev->dev, "rspi_request_dma failed.\n");
802 goto error4;
803 }
792 804
793 ret = spi_register_master(master); 805 ret = spi_register_master(master);
794 if (ret < 0) { 806 if (ret < 0) {
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 1a81c90a4a71..6e7a805d324d 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -132,7 +132,7 @@
132 132
133struct s3c64xx_spi_dma_data { 133struct s3c64xx_spi_dma_data {
134 unsigned ch; 134 unsigned ch;
135 enum dma_data_direction direction; 135 enum dma_transfer_direction direction;
136 enum dma_ch dmach; 136 enum dma_ch dmach;
137 struct property *dma_prop; 137 struct property *dma_prop;
138}; 138};
@@ -1067,11 +1067,11 @@ static int __devinit s3c64xx_spi_get_dmares(
1067 1067
1068 if (tx) { 1068 if (tx) {
1069 dma_data = &sdd->tx_dma; 1069 dma_data = &sdd->tx_dma;
1070 dma_data->direction = DMA_TO_DEVICE; 1070 dma_data->direction = DMA_MEM_TO_DEV;
1071 chan_str = "tx"; 1071 chan_str = "tx";
1072 } else { 1072 } else {
1073 dma_data = &sdd->rx_dma; 1073 dma_data = &sdd->rx_dma;
1074 dma_data->direction = DMA_FROM_DEVICE; 1074 dma_data->direction = DMA_DEV_TO_MEM;
1075 chan_str = "rx"; 1075 chan_str = "rx";
1076 } 1076 }
1077 1077