aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-s3c64xx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi-s3c64xx.c')
-rw-r--r--drivers/spi/spi-s3c64xx.c424
1 files changed, 105 insertions, 319 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index ae907dde1371..f19cd97855e8 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -34,10 +34,6 @@
34 34
35#include <linux/platform_data/spi-s3c64xx.h> 35#include <linux/platform_data/spi-s3c64xx.h>
36 36
37#ifdef CONFIG_S3C_DMA
38#include <mach/dma.h>
39#endif
40
41#define MAX_SPI_PORTS 3 37#define MAX_SPI_PORTS 3
42#define S3C64XX_SPI_QUIRK_POLL (1 << 0) 38#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
43 39
@@ -200,9 +196,6 @@ struct s3c64xx_spi_driver_data {
200 unsigned cur_speed; 196 unsigned cur_speed;
201 struct s3c64xx_spi_dma_data rx_dma; 197 struct s3c64xx_spi_dma_data rx_dma;
202 struct s3c64xx_spi_dma_data tx_dma; 198 struct s3c64xx_spi_dma_data tx_dma;
203#ifdef CONFIG_S3C_DMA
204 struct samsung_dma_ops *ops;
205#endif
206 struct s3c64xx_spi_port_config *port_conf; 199 struct s3c64xx_spi_port_config *port_conf;
207 unsigned int port_id; 200 unsigned int port_id;
208 bool cs_gpio; 201 bool cs_gpio;
@@ -284,104 +277,8 @@ static void s3c64xx_spi_dmacb(void *data)
284 spin_unlock_irqrestore(&sdd->lock, flags); 277 spin_unlock_irqrestore(&sdd->lock, flags);
285} 278}
286 279
287#ifdef CONFIG_S3C_DMA
288/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
289
290static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
291 .name = "samsung-spi-dma",
292};
293
294static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
295 unsigned len, dma_addr_t buf)
296{
297 struct s3c64xx_spi_driver_data *sdd;
298 struct samsung_dma_prep info;
299 struct samsung_dma_config config;
300
301 if (dma->direction == DMA_DEV_TO_MEM) {
302 sdd = container_of((void *)dma,
303 struct s3c64xx_spi_driver_data, rx_dma);
304 config.direction = sdd->rx_dma.direction;
305 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
306 config.width = sdd->cur_bpw / 8;
307 sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
308 } else {
309 sdd = container_of((void *)dma,
310 struct s3c64xx_spi_driver_data, tx_dma);
311 config.direction = sdd->tx_dma.direction;
312 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
313 config.width = sdd->cur_bpw / 8;
314 sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
315 }
316
317 info.cap = DMA_SLAVE;
318 info.len = len;
319 info.fp = s3c64xx_spi_dmacb;
320 info.fp_param = dma;
321 info.direction = dma->direction;
322 info.buf = buf;
323
324 sdd->ops->prepare((enum dma_ch)dma->ch, &info);
325 sdd->ops->trigger((enum dma_ch)dma->ch);
326}
327
328static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
329{
330 struct samsung_dma_req req;
331 struct device *dev = &sdd->pdev->dev;
332
333 sdd->ops = samsung_dma_get_ops();
334
335 req.cap = DMA_SLAVE;
336 req.client = &s3c64xx_spi_dma_client;
337
338 sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
339 sdd->rx_dma.dmach, &req, dev, "rx");
340 sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
341 sdd->tx_dma.dmach, &req, dev, "tx");
342
343 return 1;
344}
345
346static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
347{
348 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
349
350 /*
351 * If DMA resource was not available during
352 * probe, no need to continue with dma requests
353 * else Acquire DMA channels
354 */
355 while (!is_polling(sdd) && !acquire_dma(sdd))
356 usleep_range(10000, 11000);
357
358 return 0;
359}
360
361static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
362{
363 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
364
365 /* Free DMA channels */
366 if (!is_polling(sdd)) {
367 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
368 &s3c64xx_spi_dma_client);
369 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
370 &s3c64xx_spi_dma_client);
371 }
372
373 return 0;
374}
375
376static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
377 struct s3c64xx_spi_dma_data *dma)
378{
379 sdd->ops->stop((enum dma_ch)dma->ch);
380}
381#else
382
383static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 280static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
384 unsigned len, dma_addr_t buf) 281 struct sg_table *sgt)
385{ 282{
386 struct s3c64xx_spi_driver_data *sdd; 283 struct s3c64xx_spi_driver_data *sdd;
387 struct dma_slave_config config; 284 struct dma_slave_config config;
@@ -407,8 +304,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
407 dmaengine_slave_config(dma->ch, &config); 304 dmaengine_slave_config(dma->ch, &config);
408 } 305 }
409 306
410 desc = dmaengine_prep_slave_single(dma->ch, buf, len, 307 desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
411 dma->direction, DMA_PREP_INTERRUPT); 308 dma->direction, DMA_PREP_INTERRUPT);
412 309
413 desc->callback = s3c64xx_spi_dmacb; 310 desc->callback = s3c64xx_spi_dmacb;
414 desc->callback_param = dma; 311 desc->callback_param = dma;
@@ -437,6 +334,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
437 ret = -EBUSY; 334 ret = -EBUSY;
438 goto out; 335 goto out;
439 } 336 }
337 spi->dma_rx = sdd->rx_dma.ch;
440 338
441 sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, 339 sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
442 (void *)sdd->tx_dma.dmach, dev, "tx"); 340 (void *)sdd->tx_dma.dmach, dev, "tx");
@@ -445,6 +343,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
445 ret = -EBUSY; 343 ret = -EBUSY;
446 goto out_rx; 344 goto out_rx;
447 } 345 }
346 spi->dma_tx = sdd->tx_dma.ch;
448 } 347 }
449 348
450 ret = pm_runtime_get_sync(&sdd->pdev->dev); 349 ret = pm_runtime_get_sync(&sdd->pdev->dev);
@@ -477,12 +376,14 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
477 return 0; 376 return 0;
478} 377}
479 378
480static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, 379static bool s3c64xx_spi_can_dma(struct spi_master *master,
481 struct s3c64xx_spi_dma_data *dma) 380 struct spi_device *spi,
381 struct spi_transfer *xfer)
482{ 382{
483 dmaengine_terminate_all(dma->ch); 383 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
384
385 return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
484} 386}
485#endif
486 387
487static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 388static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
488 struct spi_device *spi, 389 struct spi_device *spi,
@@ -515,7 +416,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
515 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 416 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
516 if (dma_mode) { 417 if (dma_mode) {
517 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 418 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
518 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); 419 prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
519 } else { 420 } else {
520 switch (sdd->cur_bpw) { 421 switch (sdd->cur_bpw) {
521 case 32: 422 case 32:
@@ -547,7 +448,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
547 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 448 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
548 | S3C64XX_SPI_PACKET_CNT_EN, 449 | S3C64XX_SPI_PACKET_CNT_EN,
549 regs + S3C64XX_SPI_PACKET_CNT); 450 regs + S3C64XX_SPI_PACKET_CNT);
550 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); 451 prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
551 } 452 }
552 } 453 }
553 454
@@ -555,23 +456,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
555 writel(chcfg, regs + S3C64XX_SPI_CH_CFG); 456 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
556} 457}
557 458
558static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
559 struct spi_device *spi)
560{
561 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
562 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
563 /* Deselect the last toggled device */
564 if (spi->cs_gpio >= 0)
565 gpio_set_value(spi->cs_gpio,
566 spi->mode & SPI_CS_HIGH ? 0 : 1);
567 }
568 sdd->tgl_spi = NULL;
569 }
570
571 if (spi->cs_gpio >= 0)
572 gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0);
573}
574
575static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, 459static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
576 int timeout_ms) 460 int timeout_ms)
577{ 461{
@@ -593,112 +477,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
593 return RX_FIFO_LVL(status, sdd); 477 return RX_FIFO_LVL(status, sdd);
594} 478}
595 479
596static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, 480static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
597 struct spi_transfer *xfer, int dma_mode) 481 struct spi_transfer *xfer)
598{ 482{
599 void __iomem *regs = sdd->regs; 483 void __iomem *regs = sdd->regs;
600 unsigned long val; 484 unsigned long val;
485 u32 status;
601 int ms; 486 int ms;
602 487
603 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 488 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
604 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 489 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
605 ms += 10; /* some tolerance */ 490 ms += 10; /* some tolerance */
606 491
607 if (dma_mode) { 492 val = msecs_to_jiffies(ms) + 10;
608 val = msecs_to_jiffies(ms) + 10; 493 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
609 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 494
610 } else { 495 /*
611 u32 status; 496 * If the previous xfer was completed within timeout, then
612 val = msecs_to_loops(ms); 497 * proceed further else return -EIO.
613 do { 498 * DmaTx returns after simply writing data in the FIFO,
499 * w/o waiting for real transmission on the bus to finish.
500 * DmaRx returns only after Dma read data from FIFO which
501 * needs bus transmission to finish, so we don't worry if
502 * Xfer involved Rx(with or without Tx).
503 */
504 if (val && !xfer->rx_buf) {
505 val = msecs_to_loops(10);
506 status = readl(regs + S3C64XX_SPI_STATUS);
507 while ((TX_FIFO_LVL(status, sdd)
508 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
509 && --val) {
510 cpu_relax();
614 status = readl(regs + S3C64XX_SPI_STATUS); 511 status = readl(regs + S3C64XX_SPI_STATUS);
615 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val); 512 }
513
616 } 514 }
617 515
618 if (dma_mode) { 516 /* If timed out while checking rx/tx status return error */
619 u32 status; 517 if (!val)
620 518 return -EIO;
621 /*
622 * If the previous xfer was completed within timeout, then
623 * proceed further else return -EIO.
624 * DmaTx returns after simply writing data in the FIFO,
625 * w/o waiting for real transmission on the bus to finish.
626 * DmaRx returns only after Dma read data from FIFO which
627 * needs bus transmission to finish, so we don't worry if
628 * Xfer involved Rx(with or without Tx).
629 */
630 if (val && !xfer->rx_buf) {
631 val = msecs_to_loops(10);
632 status = readl(regs + S3C64XX_SPI_STATUS);
633 while ((TX_FIFO_LVL(status, sdd)
634 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
635 && --val) {
636 cpu_relax();
637 status = readl(regs + S3C64XX_SPI_STATUS);
638 }
639 519
640 } 520 return 0;
521}
641 522
642 /* If timed out while checking rx/tx status return error */ 523static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
643 if (!val) 524 struct spi_transfer *xfer)
644 return -EIO; 525{
645 } else { 526 void __iomem *regs = sdd->regs;
646 int loops; 527 unsigned long val;
647 u32 cpy_len; 528 u32 status;
648 u8 *buf; 529 int loops;
649 530 u32 cpy_len;
650 /* If it was only Tx */ 531 u8 *buf;
651 if (!xfer->rx_buf) { 532 int ms;
652 sdd->state &= ~TXBUSY;
653 return 0;
654 }
655 533
656 /* 534 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
657 * If the receive length is bigger than the controller fifo 535 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
658 * size, calculate the loops and read the fifo as many times. 536 ms += 10; /* some tolerance */
659 * loops = length / max fifo size (calculated by using the
660 * fifo mask).
661 * For any size less than the fifo size the below code is
662 * executed atleast once.
663 */
664 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
665 buf = xfer->rx_buf;
666 do {
667 /* wait for data to be received in the fifo */
668 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
669 (loops ? ms : 0));
670 537
671 switch (sdd->cur_bpw) { 538 val = msecs_to_loops(ms);
672 case 32: 539 do {
673 ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 540 status = readl(regs + S3C64XX_SPI_STATUS);
674 buf, cpy_len / 4); 541 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
675 break;
676 case 16:
677 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
678 buf, cpy_len / 2);
679 break;
680 default:
681 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
682 buf, cpy_len);
683 break;
684 }
685 542
686 buf = buf + cpy_len; 543
687 } while (loops--); 544 /* If it was only Tx */
688 sdd->state &= ~RXBUSY; 545 if (!xfer->rx_buf) {
546 sdd->state &= ~TXBUSY;
547 return 0;
689 } 548 }
690 549
691 return 0; 550 /*
692} 551 * If the receive length is bigger than the controller fifo
552 * size, calculate the loops and read the fifo as many times.
553 * loops = length / max fifo size (calculated by using the
554 * fifo mask).
555 * For any size less than the fifo size the below code is
556 * executed atleast once.
557 */
558 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
559 buf = xfer->rx_buf;
560 do {
561 /* wait for data to be received in the fifo */
562 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
563 (loops ? ms : 0));
564
565 switch (sdd->cur_bpw) {
566 case 32:
567 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
568 buf, cpy_len / 4);
569 break;
570 case 16:
571 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
572 buf, cpy_len / 2);
573 break;
574 default:
575 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
576 buf, cpy_len);
577 break;
578 }
693 579
694static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, 580 buf = buf + cpy_len;
695 struct spi_device *spi) 581 } while (loops--);
696{ 582 sdd->state &= ~RXBUSY;
697 if (sdd->tgl_spi == spi)
698 sdd->tgl_spi = NULL;
699 583
700 if (spi->cs_gpio >= 0) 584 return 0;
701 gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
702} 585}
703 586
704static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 587static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -774,81 +657,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
774 657
775#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 658#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
776 659
777static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
778 struct spi_message *msg)
779{
780 struct device *dev = &sdd->pdev->dev;
781 struct spi_transfer *xfer;
782
783 if (is_polling(sdd) || msg->is_dma_mapped)
784 return 0;
785
786 /* First mark all xfer unmapped */
787 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
788 xfer->rx_dma = XFER_DMAADDR_INVALID;
789 xfer->tx_dma = XFER_DMAADDR_INVALID;
790 }
791
792 /* Map until end or first fail */
793 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
794
795 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
796 continue;
797
798 if (xfer->tx_buf != NULL) {
799 xfer->tx_dma = dma_map_single(dev,
800 (void *)xfer->tx_buf, xfer->len,
801 DMA_TO_DEVICE);
802 if (dma_mapping_error(dev, xfer->tx_dma)) {
803 dev_err(dev, "dma_map_single Tx failed\n");
804 xfer->tx_dma = XFER_DMAADDR_INVALID;
805 return -ENOMEM;
806 }
807 }
808
809 if (xfer->rx_buf != NULL) {
810 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
811 xfer->len, DMA_FROM_DEVICE);
812 if (dma_mapping_error(dev, xfer->rx_dma)) {
813 dev_err(dev, "dma_map_single Rx failed\n");
814 dma_unmap_single(dev, xfer->tx_dma,
815 xfer->len, DMA_TO_DEVICE);
816 xfer->tx_dma = XFER_DMAADDR_INVALID;
817 xfer->rx_dma = XFER_DMAADDR_INVALID;
818 return -ENOMEM;
819 }
820 }
821 }
822
823 return 0;
824}
825
826static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
827 struct spi_message *msg)
828{
829 struct device *dev = &sdd->pdev->dev;
830 struct spi_transfer *xfer;
831
832 if (is_polling(sdd) || msg->is_dma_mapped)
833 return;
834
835 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
836
837 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
838 continue;
839
840 if (xfer->rx_buf != NULL
841 && xfer->rx_dma != XFER_DMAADDR_INVALID)
842 dma_unmap_single(dev, xfer->rx_dma,
843 xfer->len, DMA_FROM_DEVICE);
844
845 if (xfer->tx_buf != NULL
846 && xfer->tx_dma != XFER_DMAADDR_INVALID)
847 dma_unmap_single(dev, xfer->tx_dma,
848 xfer->len, DMA_TO_DEVICE);
849 }
850}
851
852static int s3c64xx_spi_prepare_message(struct spi_master *master, 660static int s3c64xx_spi_prepare_message(struct spi_master *master,
853 struct spi_message *msg) 661 struct spi_message *msg)
854{ 662{
@@ -866,13 +674,6 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
866 s3c64xx_spi_config(sdd); 674 s3c64xx_spi_config(sdd);
867 } 675 }
868 676
869 /* Map all the transfers if needed */
870 if (s3c64xx_spi_map_mssg(sdd, msg)) {
871 dev_err(&spi->dev,
872 "Xfer: Unable to map message buffers!\n");
873 return -ENOMEM;
874 }
875
876 /* Configure feedback delay */ 677 /* Configure feedback delay */
877 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK); 678 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
878 679
@@ -896,13 +697,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
896 bpw = xfer->bits_per_word; 697 bpw = xfer->bits_per_word;
897 speed = xfer->speed_hz ? : spi->max_speed_hz; 698 speed = xfer->speed_hz ? : spi->max_speed_hz;
898 699
899 if (xfer->len % (bpw / 8)) {
900 dev_err(&spi->dev,
901 "Xfer length(%u) not a multiple of word size(%u)\n",
902 xfer->len, bpw / 8);
903 return -EIO;
904 }
905
906 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 700 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
907 sdd->cur_bpw = bpw; 701 sdd->cur_bpw = bpw;
908 sdd->cur_speed = speed; 702 sdd->cur_speed = speed;
@@ -929,7 +723,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
929 723
930 spin_unlock_irqrestore(&sdd->lock, flags); 724 spin_unlock_irqrestore(&sdd->lock, flags);
931 725
932 status = wait_for_xfer(sdd, xfer, use_dma); 726 if (use_dma)
727 status = wait_for_dma(sdd, xfer);
728 else
729 status = wait_for_pio(sdd, xfer);
933 730
934 if (status) { 731 if (status) {
935 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 732 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
@@ -941,10 +738,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
941 if (use_dma) { 738 if (use_dma) {
942 if (xfer->tx_buf != NULL 739 if (xfer->tx_buf != NULL
943 && (sdd->state & TXBUSY)) 740 && (sdd->state & TXBUSY))
944 s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma); 741 dmaengine_terminate_all(sdd->tx_dma.ch);
945 if (xfer->rx_buf != NULL 742 if (xfer->rx_buf != NULL
946 && (sdd->state & RXBUSY)) 743 && (sdd->state & RXBUSY))
947 s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma); 744 dmaengine_terminate_all(sdd->rx_dma.ch);
948 } 745 }
949 } else { 746 } else {
950 flush_fifo(sdd); 747 flush_fifo(sdd);
@@ -953,16 +750,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
953 return status; 750 return status;
954} 751}
955 752
956static int s3c64xx_spi_unprepare_message(struct spi_master *master,
957 struct spi_message *msg)
958{
959 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
960
961 s3c64xx_spi_unmap_mssg(sdd, msg);
962
963 return 0;
964}
965
966static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( 753static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
967 struct spi_device *spi) 754 struct spi_device *spi)
968{ 755{
@@ -1092,14 +879,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
1092 879
1093 pm_runtime_put(&sdd->pdev->dev); 880 pm_runtime_put(&sdd->pdev->dev);
1094 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 881 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
1095 disable_cs(sdd, spi);
1096 return 0; 882 return 0;
1097 883
1098setup_exit: 884setup_exit:
1099 pm_runtime_put(&sdd->pdev->dev); 885 pm_runtime_put(&sdd->pdev->dev);
1100 /* setup() returns with device de-selected */ 886 /* setup() returns with device de-selected */
1101 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL); 887 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
1102 disable_cs(sdd, spi);
1103 888
1104 gpio_free(cs->line); 889 gpio_free(cs->line);
1105 spi_set_ctldata(spi, NULL); 890 spi_set_ctldata(spi, NULL);
@@ -1338,7 +1123,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1338 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; 1123 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1339 master->prepare_message = s3c64xx_spi_prepare_message; 1124 master->prepare_message = s3c64xx_spi_prepare_message;
1340 master->transfer_one = s3c64xx_spi_transfer_one; 1125 master->transfer_one = s3c64xx_spi_transfer_one;
1341 master->unprepare_message = s3c64xx_spi_unprepare_message;
1342 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; 1126 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
1343 master->num_chipselect = sci->num_cs; 1127 master->num_chipselect = sci->num_cs;
1344 master->dma_alignment = 8; 1128 master->dma_alignment = 8;
@@ -1347,6 +1131,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1347 /* the spi->mode bits understood by this driver: */ 1131 /* the spi->mode bits understood by this driver: */
1348 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1132 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1349 master->auto_runtime_pm = true; 1133 master->auto_runtime_pm = true;
1134 if (!is_polling(sdd))
1135 master->can_dma = s3c64xx_spi_can_dma;
1350 1136
1351 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); 1137 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1352 if (IS_ERR(sdd->regs)) { 1138 if (IS_ERR(sdd->regs)) {