aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-sh-msiof.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi-sh-msiof.c')
-rw-r--r--drivers/spi/spi-sh-msiof.c106
1 files changed, 63 insertions, 43 deletions
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 2a4354dcd661..3f365402fcc0 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -636,17 +636,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
636 dma_cookie_t cookie; 636 dma_cookie_t cookie;
637 int ret; 637 int ret;
638 638
639 if (tx) { 639 /* First prepare and submit the DMA request(s), as this may fail */
640 ier_bits |= IER_TDREQE | IER_TDMAE;
641 dma_sync_single_for_device(p->master->dma_tx->device->dev,
642 p->tx_dma_addr, len, DMA_TO_DEVICE);
643 desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
644 p->tx_dma_addr, len, DMA_TO_DEVICE,
645 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
646 if (!desc_tx)
647 return -EAGAIN;
648 }
649
650 if (rx) { 640 if (rx) {
651 ier_bits |= IER_RDREQE | IER_RDMAE; 641 ier_bits |= IER_RDREQE | IER_RDMAE;
652 desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, 642 desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
@@ -654,30 +644,26 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
654 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 644 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
655 if (!desc_rx) 645 if (!desc_rx)
656 return -EAGAIN; 646 return -EAGAIN;
657 }
658
659 /* 1 stage FIFO watermarks for DMA */
660 sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
661 647
662 /* setup msiof transfer mode registers (32-bit words) */
663 sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
664
665 sh_msiof_write(p, IER, ier_bits);
666
667 reinit_completion(&p->done);
668
669 if (rx) {
670 desc_rx->callback = sh_msiof_dma_complete; 648 desc_rx->callback = sh_msiof_dma_complete;
671 desc_rx->callback_param = p; 649 desc_rx->callback_param = p;
672 cookie = dmaengine_submit(desc_rx); 650 cookie = dmaengine_submit(desc_rx);
673 if (dma_submit_error(cookie)) { 651 if (dma_submit_error(cookie))
674 ret = cookie; 652 return cookie;
675 goto stop_ier;
676 }
677 dma_async_issue_pending(p->master->dma_rx);
678 } 653 }
679 654
680 if (tx) { 655 if (tx) {
656 ier_bits |= IER_TDREQE | IER_TDMAE;
657 dma_sync_single_for_device(p->master->dma_tx->device->dev,
658 p->tx_dma_addr, len, DMA_TO_DEVICE);
659 desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
660 p->tx_dma_addr, len, DMA_TO_DEVICE,
661 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 if (!desc_tx) {
663 ret = -EAGAIN;
664 goto no_dma_tx;
665 }
666
681 if (rx) { 667 if (rx) {
682 /* No callback */ 668 /* No callback */
683 desc_tx->callback = NULL; 669 desc_tx->callback = NULL;
@@ -688,15 +674,30 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
688 cookie = dmaengine_submit(desc_tx); 674 cookie = dmaengine_submit(desc_tx);
689 if (dma_submit_error(cookie)) { 675 if (dma_submit_error(cookie)) {
690 ret = cookie; 676 ret = cookie;
691 goto stop_rx; 677 goto no_dma_tx;
692 } 678 }
693 dma_async_issue_pending(p->master->dma_tx);
694 } 679 }
695 680
681 /* 1 stage FIFO watermarks for DMA */
682 sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
683
684 /* setup msiof transfer mode registers (32-bit words) */
685 sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
686
687 sh_msiof_write(p, IER, ier_bits);
688
689 reinit_completion(&p->done);
690
691 /* Now start DMA */
692 if (rx)
693 dma_async_issue_pending(p->master->dma_rx);
694 if (tx)
695 dma_async_issue_pending(p->master->dma_tx);
696
696 ret = sh_msiof_spi_start(p, rx); 697 ret = sh_msiof_spi_start(p, rx);
697 if (ret) { 698 if (ret) {
698 dev_err(&p->pdev->dev, "failed to start hardware\n"); 699 dev_err(&p->pdev->dev, "failed to start hardware\n");
699 goto stop_tx; 700 goto stop_dma;
700 } 701 }
701 702
702 /* wait for tx fifo to be emptied / rx fifo to be filled */ 703 /* wait for tx fifo to be emptied / rx fifo to be filled */
@@ -726,13 +727,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
726stop_reset: 727stop_reset:
727 sh_msiof_reset_str(p); 728 sh_msiof_reset_str(p);
728 sh_msiof_spi_stop(p, rx); 729 sh_msiof_spi_stop(p, rx);
729stop_tx: 730stop_dma:
730 if (tx) 731 if (tx)
731 dmaengine_terminate_all(p->master->dma_tx); 732 dmaengine_terminate_all(p->master->dma_tx);
732stop_rx: 733no_dma_tx:
733 if (rx) 734 if (rx)
734 dmaengine_terminate_all(p->master->dma_rx); 735 dmaengine_terminate_all(p->master->dma_rx);
735stop_ier:
736 sh_msiof_write(p, IER, 0); 736 sh_msiof_write(p, IER, 0);
737 return ret; 737 return ret;
738} 738}
@@ -928,6 +928,9 @@ static const struct of_device_id sh_msiof_match[] = {
928 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, 928 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
929 { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data }, 929 { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
930 { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data }, 930 { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
931 { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
932 { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
933 { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
931 {}, 934 {},
932}; 935};
933MODULE_DEVICE_TABLE(of, sh_msiof_match); 936MODULE_DEVICE_TABLE(of, sh_msiof_match);
@@ -972,20 +975,24 @@ static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
972 dma_cap_zero(mask); 975 dma_cap_zero(mask);
973 dma_cap_set(DMA_SLAVE, mask); 976 dma_cap_set(DMA_SLAVE, mask);
974 977
975 chan = dma_request_channel(mask, shdma_chan_filter, 978 chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
976 (void *)(unsigned long)id); 979 (void *)(unsigned long)id, dev,
980 dir == DMA_MEM_TO_DEV ? "tx" : "rx");
977 if (!chan) { 981 if (!chan) {
978 dev_warn(dev, "dma_request_channel failed\n"); 982 dev_warn(dev, "dma_request_slave_channel_compat failed\n");
979 return NULL; 983 return NULL;
980 } 984 }
981 985
982 memset(&cfg, 0, sizeof(cfg)); 986 memset(&cfg, 0, sizeof(cfg));
983 cfg.slave_id = id; 987 cfg.slave_id = id;
984 cfg.direction = dir; 988 cfg.direction = dir;
985 if (dir == DMA_MEM_TO_DEV) 989 if (dir == DMA_MEM_TO_DEV) {
986 cfg.dst_addr = port_addr; 990 cfg.dst_addr = port_addr;
987 else 991 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
992 } else {
988 cfg.src_addr = port_addr; 993 cfg.src_addr = port_addr;
994 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
995 }
989 996
990 ret = dmaengine_slave_config(chan, &cfg); 997 ret = dmaengine_slave_config(chan, &cfg);
991 if (ret) { 998 if (ret) {
@@ -1002,12 +1009,22 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1002 struct platform_device *pdev = p->pdev; 1009 struct platform_device *pdev = p->pdev;
1003 struct device *dev = &pdev->dev; 1010 struct device *dev = &pdev->dev;
1004 const struct sh_msiof_spi_info *info = dev_get_platdata(dev); 1011 const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
1012 unsigned int dma_tx_id, dma_rx_id;
1005 const struct resource *res; 1013 const struct resource *res;
1006 struct spi_master *master; 1014 struct spi_master *master;
1007 struct device *tx_dev, *rx_dev; 1015 struct device *tx_dev, *rx_dev;
1008 1016
1009 if (!info || !info->dma_tx_id || !info->dma_rx_id) 1017 if (dev->of_node) {
1010 return 0; /* The driver assumes no error */ 1018 /* In the OF case we will get the slave IDs from the DT */
1019 dma_tx_id = 0;
1020 dma_rx_id = 0;
1021 } else if (info && info->dma_tx_id && info->dma_rx_id) {
1022 dma_tx_id = info->dma_tx_id;
1023 dma_rx_id = info->dma_rx_id;
1024 } else {
1025 /* The driver assumes no error */
1026 return 0;
1027 }
1011 1028
1012 /* The DMA engine uses the second register set, if present */ 1029 /* The DMA engine uses the second register set, if present */
1013 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1030 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1016,13 +1033,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1016 1033
1017 master = p->master; 1034 master = p->master;
1018 master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, 1035 master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
1019 info->dma_tx_id, 1036 dma_tx_id,
1020 res->start + TFDR); 1037 res->start + TFDR);
1021 if (!master->dma_tx) 1038 if (!master->dma_tx)
1022 return -ENODEV; 1039 return -ENODEV;
1023 1040
1024 master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, 1041 master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
1025 info->dma_rx_id, 1042 dma_rx_id,
1026 res->start + RFDR); 1043 res->start + RFDR);
1027 if (!master->dma_rx) 1044 if (!master->dma_rx)
1028 goto free_tx_chan; 1045 goto free_tx_chan;
@@ -1205,6 +1222,9 @@ static struct platform_device_id spi_driver_ids[] = {
1205 { "spi_sh_msiof", (kernel_ulong_t)&sh_data }, 1222 { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
1206 { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data }, 1223 { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
1207 { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data }, 1224 { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
1225 { "spi_r8a7792_msiof", (kernel_ulong_t)&r8a779x_data },
1226 { "spi_r8a7793_msiof", (kernel_ulong_t)&r8a779x_data },
1227 { "spi_r8a7794_msiof", (kernel_ulong_t)&r8a779x_data },
1208 {}, 1228 {},
1209}; 1229};
1210MODULE_DEVICE_TABLE(platform, spi_driver_ids); 1230MODULE_DEVICE_TABLE(platform, spi_driver_ids);