aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorBoojin Kim <boojin.kim@samsung.com>2011-09-01 20:44:41 -0400
committerVinod Koul <vinod.koul@intel.com>2011-09-14 01:40:03 -0400
commit39d3e8074e44c1953928a0b91bc328f552c5fc79 (patch)
treeb7cf213415d43309a71e4366d5a9a6da93d0d8ee /drivers/spi
parent978ce50dd5cfd93380ded89d61de9d8109ebd814 (diff)
spi/s3c64xx: Add support DMA engine API
This patch adds to support DMA generic API to transfer raw SPI data. Basiclly the spi driver uses DMA generic API if architecture supports it. Otherwise, uses Samsung specific S3C-PL330 APIs. Signed-off-by: Boojin Kim <boojin.kim@samsung.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Vinod Koul <vinod.koul@intel.com> Acked-by: Grant Likely <grant.likely@secretlab.ca> Signed-off-by: Kukjin Kim <kgene.kim@samsung.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/spi-s3c64xx.c141
1 files changed, 69 insertions, 72 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 595dacc7645f..24f49032ec35 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -171,6 +171,9 @@ struct s3c64xx_spi_driver_data {
171 unsigned state; 171 unsigned state;
172 unsigned cur_mode, cur_bpw; 172 unsigned cur_mode, cur_bpw;
173 unsigned cur_speed; 173 unsigned cur_speed;
174 unsigned rx_ch;
175 unsigned tx_ch;
176 struct samsung_dma_ops *ops;
174}; 177};
175 178
176static struct s3c2410_dma_client s3c64xx_spi_dma_client = { 179static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
@@ -226,6 +229,38 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
226 writel(val, regs + S3C64XX_SPI_CH_CFG); 229 writel(val, regs + S3C64XX_SPI_CH_CFG);
227} 230}
228 231
232static void s3c64xx_spi_dma_rxcb(void *data)
233{
234 struct s3c64xx_spi_driver_data *sdd
235 = (struct s3c64xx_spi_driver_data *)data;
236 unsigned long flags;
237
238 spin_lock_irqsave(&sdd->lock, flags);
239
240 sdd->state &= ~RXBUSY;
241 /* If the other done */
242 if (!(sdd->state & TXBUSY))
243 complete(&sdd->xfer_completion);
244
245 spin_unlock_irqrestore(&sdd->lock, flags);
246}
247
248static void s3c64xx_spi_dma_txcb(void *data)
249{
250 struct s3c64xx_spi_driver_data *sdd
251 = (struct s3c64xx_spi_driver_data *)data;
252 unsigned long flags;
253
254 spin_lock_irqsave(&sdd->lock, flags);
255
256 sdd->state &= ~TXBUSY;
257 /* If the other done */
258 if (!(sdd->state & RXBUSY))
259 complete(&sdd->xfer_completion);
260
261 spin_unlock_irqrestore(&sdd->lock, flags);
262}
263
229static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 264static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
230 struct spi_device *spi, 265 struct spi_device *spi,
231 struct spi_transfer *xfer, int dma_mode) 266 struct spi_transfer *xfer, int dma_mode)
@@ -233,6 +268,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
233 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 268 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
234 void __iomem *regs = sdd->regs; 269 void __iomem *regs = sdd->regs;
235 u32 modecfg, chcfg; 270 u32 modecfg, chcfg;
271 struct samsung_dma_prep_info info;
236 272
237 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); 273 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
238 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); 274 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
@@ -258,10 +294,14 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
258 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 294 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
259 if (dma_mode) { 295 if (dma_mode) {
260 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 296 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
261 s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); 297 info.cap = DMA_SLAVE;
262 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, 298 info.direction = DMA_TO_DEVICE;
263 xfer->tx_dma, xfer->len); 299 info.buf = xfer->tx_dma;
264 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); 300 info.len = xfer->len;
301 info.fp = s3c64xx_spi_dma_txcb;
302 info.fp_param = sdd;
303 sdd->ops->prepare(sdd->tx_ch, &info);
304 sdd->ops->trigger(sdd->tx_ch);
265 } else { 305 } else {
266 switch (sdd->cur_bpw) { 306 switch (sdd->cur_bpw) {
267 case 32: 307 case 32:
@@ -293,10 +333,14 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
293 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 333 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
294 | S3C64XX_SPI_PACKET_CNT_EN, 334 | S3C64XX_SPI_PACKET_CNT_EN,
295 regs + S3C64XX_SPI_PACKET_CNT); 335 regs + S3C64XX_SPI_PACKET_CNT);
296 s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); 336 info.cap = DMA_SLAVE;
297 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, 337 info.direction = DMA_FROM_DEVICE;
298 xfer->rx_dma, xfer->len); 338 info.buf = xfer->rx_dma;
299 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); 339 info.len = xfer->len;
340 info.fp = s3c64xx_spi_dma_rxcb;
341 info.fp_param = sdd;
342 sdd->ops->prepare(sdd->rx_ch, &info);
343 sdd->ops->trigger(sdd->rx_ch);
300 } 344 }
301 } 345 }
302 346
@@ -482,46 +526,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
482 } 526 }
483} 527}
484 528
485static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
486 int size, enum s3c2410_dma_buffresult res)
487{
488 struct s3c64xx_spi_driver_data *sdd = buf_id;
489 unsigned long flags;
490
491 spin_lock_irqsave(&sdd->lock, flags);
492
493 if (res == S3C2410_RES_OK)
494 sdd->state &= ~RXBUSY;
495 else
496 dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
497
498 /* If the other done */
499 if (!(sdd->state & TXBUSY))
500 complete(&sdd->xfer_completion);
501
502 spin_unlock_irqrestore(&sdd->lock, flags);
503}
504
505static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
506 int size, enum s3c2410_dma_buffresult res)
507{
508 struct s3c64xx_spi_driver_data *sdd = buf_id;
509 unsigned long flags;
510
511 spin_lock_irqsave(&sdd->lock, flags);
512
513 if (res == S3C2410_RES_OK)
514 sdd->state &= ~TXBUSY;
515 else
516 dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
517
518 /* If the other done */
519 if (!(sdd->state & RXBUSY))
520 complete(&sdd->xfer_completion);
521
522 spin_unlock_irqrestore(&sdd->lock, flags);
523}
524
525#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 529#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
526 530
527static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, 531static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
@@ -696,12 +700,10 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
696 if (use_dma) { 700 if (use_dma) {
697 if (xfer->tx_buf != NULL 701 if (xfer->tx_buf != NULL
698 && (sdd->state & TXBUSY)) 702 && (sdd->state & TXBUSY))
699 s3c2410_dma_ctrl(sdd->tx_dmach, 703 sdd->ops->stop(sdd->tx_ch);
700 S3C2410_DMAOP_FLUSH);
701 if (xfer->rx_buf != NULL 704 if (xfer->rx_buf != NULL
702 && (sdd->state & RXBUSY)) 705 && (sdd->state & RXBUSY))
703 s3c2410_dma_ctrl(sdd->rx_dmach, 706 sdd->ops->stop(sdd->rx_ch);
704 S3C2410_DMAOP_FLUSH);
705 } 707 }
706 708
707 goto out; 709 goto out;
@@ -741,24 +743,19 @@ out:
741 743
742static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) 744static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
743{ 745{
744 if (s3c2410_dma_request(sdd->rx_dmach, 746
745 &s3c64xx_spi_dma_client, NULL) < 0) { 747 struct samsung_dma_info info;
746 dev_err(&sdd->pdev->dev, "cannot get RxDMA\n"); 748 sdd->ops = samsung_dma_get_ops();
747 return 0; 749
748 } 750 info.cap = DMA_SLAVE;
749 s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb); 751 info.client = &s3c64xx_spi_dma_client;
750 s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW, 752 info.direction = DMA_FROM_DEVICE;
751 sdd->sfr_start + S3C64XX_SPI_RX_DATA); 753 info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
752 754 info.width = sdd->cur_bpw / 8;
753 if (s3c2410_dma_request(sdd->tx_dmach, 755 sdd->rx_ch = sdd->ops->request(sdd->rx_dmach, &info);
754 &s3c64xx_spi_dma_client, NULL) < 0) { 756 info.direction = DMA_TO_DEVICE;
755 dev_err(&sdd->pdev->dev, "cannot get TxDMA\n"); 757 info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
756 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); 758 sdd->tx_ch = sdd->ops->request(sdd->tx_dmach, &info);
757 return 0;
758 }
759 s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
760 s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
761 sdd->sfr_start + S3C64XX_SPI_TX_DATA);
762 759
763 return 1; 760 return 1;
764} 761}
@@ -799,8 +796,8 @@ static void s3c64xx_spi_work(struct work_struct *work)
799 spin_unlock_irqrestore(&sdd->lock, flags); 796 spin_unlock_irqrestore(&sdd->lock, flags);
800 797
801 /* Free DMA channels */ 798 /* Free DMA channels */
802 s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); 799 sdd->ops->release(sdd->rx_ch, &s3c64xx_spi_dma_client);
803 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); 800 sdd->ops->release(sdd->tx_ch, &s3c64xx_spi_dma_client);
804} 801}
805 802
806static int s3c64xx_spi_transfer(struct spi_device *spi, 803static int s3c64xx_spi_transfer(struct spi_device *spi,