diff options
author | Alexandre Bounine <alexandre.bounine@idt.com> | 2012-05-31 19:26:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-31 20:49:31 -0400 |
commit | 9eaa3d9bb2c4da99b1161cfcc63f3e77d9d3d156 (patch) | |
tree | d3ad133b7742ed47db6750f524c4ee895790b2a2 /drivers/rapidio | |
parent | e42d98ebe7d754a2c9fbccd6186721d3ca8679f6 (diff) |
rapidio/tsi721: add DMA engine support
Adds support for DMA Engine API into Tsi721 mport driver.
Includes following changes for Tsi721 driver:
- Modifies BDMA register offset definitions to support per-channel handling
- Separates BDMA channel reserved for RIO Maintenance requests
- Adds DMA Engine callback routines
Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Li Yang <leoli@freescale.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/rapidio')
-rw-r--r-- | drivers/rapidio/devices/Makefile | 3 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.c | 211 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.h | 105 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721_dma.c | 823 |
4 files changed, 1050 insertions, 92 deletions
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile index 3b7b4e2dff7c..7b62860f34f8 100644 --- a/drivers/rapidio/devices/Makefile +++ b/drivers/rapidio/devices/Makefile | |||
@@ -3,3 +3,6 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o | 5 | obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o |
6 | ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y) | ||
7 | obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o | ||
8 | endif | ||
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 30d2072f480b..722246cf20ab 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
108 | u16 destid, u8 hopcount, u32 offset, int len, | 108 | u16 destid, u8 hopcount, u32 offset, int len, |
109 | u32 *data, int do_wr) | 109 | u32 *data, int do_wr) |
110 | { | 110 | { |
111 | void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); | ||
111 | struct tsi721_dma_desc *bd_ptr; | 112 | struct tsi721_dma_desc *bd_ptr; |
112 | u32 rd_count, swr_ptr, ch_stat; | 113 | u32 rd_count, swr_ptr, ch_stat; |
113 | int i, err = 0; | 114 | int i, err = 0; |
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
116 | if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) | 117 | if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) |
117 | return -EINVAL; | 118 | return -EINVAL; |
118 | 119 | ||
119 | bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base; | 120 | bd_ptr = priv->mdma.bd_base; |
120 | 121 | ||
121 | rd_count = ioread32( | 122 | rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); |
122 | priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT)); | ||
123 | 123 | ||
124 | /* Initialize DMA descriptor */ | 124 | /* Initialize DMA descriptor */ |
125 | bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); | 125 | bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); |
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
134 | mb(); | 134 | mb(); |
135 | 135 | ||
136 | /* Start DMA operation */ | 136 | /* Start DMA operation */ |
137 | iowrite32(rd_count + 2, | 137 | iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT); |
138 | priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); | 138 | ioread32(regs + TSI721_DMAC_DWRCNT); |
139 | ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); | ||
140 | i = 0; | 139 | i = 0; |
141 | 140 | ||
142 | /* Wait until DMA transfer is finished */ | 141 | /* Wait until DMA transfer is finished */ |
143 | while ((ch_stat = ioread32(priv->regs + | 142 | while ((ch_stat = ioread32(regs + TSI721_DMAC_STS)) |
144 | TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) { | 143 | & TSI721_DMAC_STS_RUN) { |
145 | udelay(1); | 144 | udelay(1); |
146 | if (++i >= 5000000) { | 145 | if (++i >= 5000000) { |
147 | dev_dbg(&priv->pdev->dev, | 146 | dev_dbg(&priv->pdev->dev, |
148 | "%s : DMA[%d] read timeout ch_status=%x\n", | 147 | "%s : DMA[%d] read timeout ch_status=%x\n", |
149 | __func__, TSI721_DMACH_MAINT, ch_stat); | 148 | __func__, priv->mdma.ch_id, ch_stat); |
150 | if (!do_wr) | 149 | if (!do_wr) |
151 | *data = 0xffffffff; | 150 | *data = 0xffffffff; |
152 | err = -EIO; | 151 | err = -EIO; |
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
162 | __func__, ch_stat); | 161 | __func__, ch_stat); |
163 | dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", | 162 | dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", |
164 | do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); | 163 | do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); |
165 | iowrite32(TSI721_DMAC_INT_ALL, | 164 | iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); |
166 | priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT)); | 165 | iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); |
167 | iowrite32(TSI721_DMAC_CTL_INIT, | ||
168 | priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT)); | ||
169 | udelay(10); | 166 | udelay(10); |
170 | iowrite32(0, priv->regs + | 167 | iowrite32(0, regs + TSI721_DMAC_DWRCNT); |
171 | TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); | ||
172 | udelay(1); | 168 | udelay(1); |
173 | if (!do_wr) | 169 | if (!do_wr) |
174 | *data = 0xffffffff; | 170 | *data = 0xffffffff; |
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
184 | * NOTE: Skipping check and clear FIFO entries because we are waiting | 180 | * NOTE: Skipping check and clear FIFO entries because we are waiting |
185 | * for transfer to be completed. | 181 | * for transfer to be completed. |
186 | */ | 182 | */ |
187 | swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT)); | 183 | swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); |
188 | iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT)); | 184 | iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); |
189 | err_out: | 185 | err_out: |
190 | 186 | ||
191 | return err; | 187 | return err; |
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) | |||
541 | tsi721_pw_handler(mport); | 537 | tsi721_pw_handler(mport); |
542 | } | 538 | } |
543 | 539 | ||
540 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
541 | if (dev_int & TSI721_DEV_INT_BDMA_CH) { | ||
542 | int ch; | ||
543 | |||
544 | if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { | ||
545 | dev_dbg(&priv->pdev->dev, | ||
546 | "IRQ from DMA channel 0x%08x\n", dev_ch_int); | ||
547 | |||
548 | for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { | ||
549 | if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) | ||
550 | continue; | ||
551 | tsi721_bdma_handler(&priv->bdma[ch]); | ||
552 | } | ||
553 | } | ||
554 | } | ||
555 | #endif | ||
544 | return IRQ_HANDLED; | 556 | return IRQ_HANDLED; |
545 | } | 557 | } |
546 | 558 | ||
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv) | |||
553 | priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); | 565 | priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); |
554 | iowrite32(TSI721_SR_CHINT_IDBQRCV, | 566 | iowrite32(TSI721_SR_CHINT_IDBQRCV, |
555 | priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); | 567 | priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); |
556 | iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE), | ||
557 | priv->regs + TSI721_DEV_CHAN_INTE); | ||
558 | 568 | ||
559 | /* Enable SRIO MAC interrupts */ | 569 | /* Enable SRIO MAC interrupts */ |
560 | iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, | 570 | iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, |
561 | priv->regs + TSI721_RIO_EM_DEV_INT_EN); | 571 | priv->regs + TSI721_RIO_EM_DEV_INT_EN); |
562 | 572 | ||
573 | /* Enable interrupts from channels in use */ | ||
574 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
575 | intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) | | ||
576 | (TSI721_INT_BDMA_CHAN_M & | ||
577 | ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT)); | ||
578 | #else | ||
579 | intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE); | ||
580 | #endif | ||
581 | iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE); | ||
582 | |||
563 | if (priv->flags & TSI721_USING_MSIX) | 583 | if (priv->flags & TSI721_USING_MSIX) |
564 | intr = TSI721_DEV_INT_SRIO; | 584 | intr = TSI721_DEV_INT_SRIO; |
565 | else | 585 | else |
566 | intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | | 586 | intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | |
567 | TSI721_DEV_INT_SMSG_CH; | 587 | TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; |
568 | 588 | ||
569 | iowrite32(intr, priv->regs + TSI721_DEV_INTE); | 589 | iowrite32(intr, priv->regs + TSI721_DEV_INTE); |
570 | ioread32(priv->regs + TSI721_DEV_INTE); | 590 | ioread32(priv->regs + TSI721_DEV_INTE); |
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv) | |||
715 | TSI721_MSIX_OMSG_INT(i); | 735 | TSI721_MSIX_OMSG_INT(i); |
716 | } | 736 | } |
717 | 737 | ||
738 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
739 | /* | ||
740 | * Initialize MSI-X entries for Block DMA Engine: | ||
741 | * this driver supports XXX DMA channels | ||
742 | * (one is reserved for SRIO maintenance transactions) | ||
743 | */ | ||
744 | for (i = 0; i < TSI721_DMA_CHNUM; i++) { | ||
745 | entries[TSI721_VECT_DMA0_DONE + i].entry = | ||
746 | TSI721_MSIX_DMACH_DONE(i); | ||
747 | entries[TSI721_VECT_DMA0_INT + i].entry = | ||
748 | TSI721_MSIX_DMACH_INT(i); | ||
749 | } | ||
750 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
751 | |||
718 | err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); | 752 | err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); |
719 | if (err) { | 753 | if (err) { |
720 | if (err > 0) | 754 | if (err > 0) |
721 | dev_info(&priv->pdev->dev, | 755 | dev_info(&priv->pdev->dev, |
722 | "Only %d MSI-X vectors available, " | 756 | "Only %d MSI-X vectors available, " |
723 | "not using MSI-X\n", err); | 757 | "not using MSI-X\n", err); |
758 | else | ||
759 | dev_err(&priv->pdev->dev, | ||
760 | "Failed to enable MSI-X (err=%d)\n", err); | ||
724 | return err; | 761 | return err; |
725 | } | 762 | } |
726 | 763 | ||
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv) | |||
760 | i, pci_name(priv->pdev)); | 797 | i, pci_name(priv->pdev)); |
761 | } | 798 | } |
762 | 799 | ||
800 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
801 | for (i = 0; i < TSI721_DMA_CHNUM; i++) { | ||
802 | priv->msix[TSI721_VECT_DMA0_DONE + i].vector = | ||
803 | entries[TSI721_VECT_DMA0_DONE + i].vector; | ||
804 | snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name, | ||
805 | IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s", | ||
806 | i, pci_name(priv->pdev)); | ||
807 | |||
808 | priv->msix[TSI721_VECT_DMA0_INT + i].vector = | ||
809 | entries[TSI721_VECT_DMA0_INT + i].vector; | ||
810 | snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name, | ||
811 | IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s", | ||
812 | i, pci_name(priv->pdev)); | ||
813 | } | ||
814 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
815 | |||
763 | return 0; | 816 | return 0; |
764 | } | 817 | } |
765 | #endif /* CONFIG_PCI_MSI */ | 818 | #endif /* CONFIG_PCI_MSI */ |
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv) | |||
888 | priv->idb_base = NULL; | 941 | priv->idb_base = NULL; |
889 | } | 942 | } |
890 | 943 | ||
891 | static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) | 944 | /** |
945 | * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel. | ||
946 | * @priv: pointer to tsi721 private data | ||
947 | * | ||
948 | * Initialize BDMA channel allocated for RapidIO maintenance read/write | ||
949 | * request generation | ||
950 | * Returns %0 on success or %-ENOMEM on failure. | ||
951 | */ | ||
952 | static int tsi721_bdma_maint_init(struct tsi721_device *priv) | ||
892 | { | 953 | { |
893 | struct tsi721_dma_desc *bd_ptr; | 954 | struct tsi721_dma_desc *bd_ptr; |
894 | u64 *sts_ptr; | 955 | u64 *sts_ptr; |
895 | dma_addr_t bd_phys, sts_phys; | 956 | dma_addr_t bd_phys, sts_phys; |
896 | int sts_size; | 957 | int sts_size; |
897 | int bd_num = priv->bdma[chnum].bd_num; | 958 | int bd_num = 2; |
959 | void __iomem *regs; | ||
898 | 960 | ||
899 | dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum); | 961 | dev_dbg(&priv->pdev->dev, |
962 | "Init Block DMA Engine for Maintenance requests, CH%d\n", | ||
963 | TSI721_DMACH_MAINT); | ||
900 | 964 | ||
901 | /* | 965 | /* |
902 | * Initialize DMA channel for maintenance requests | 966 | * Initialize DMA channel for maintenance requests |
903 | */ | 967 | */ |
904 | 968 | ||
969 | priv->mdma.ch_id = TSI721_DMACH_MAINT; | ||
970 | regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); | ||
971 | |||
905 | /* Allocate space for DMA descriptors */ | 972 | /* Allocate space for DMA descriptors */ |
906 | bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, | 973 | bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, |
907 | bd_num * sizeof(struct tsi721_dma_desc), | 974 | bd_num * sizeof(struct tsi721_dma_desc), |
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) | |||
909 | if (!bd_ptr) | 976 | if (!bd_ptr) |
910 | return -ENOMEM; | 977 | return -ENOMEM; |
911 | 978 | ||
912 | priv->bdma[chnum].bd_phys = bd_phys; | 979 | priv->mdma.bd_num = bd_num; |
913 | priv->bdma[chnum].bd_base = bd_ptr; | 980 | priv->mdma.bd_phys = bd_phys; |
981 | priv->mdma.bd_base = bd_ptr; | ||
914 | 982 | ||
915 | dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", | 983 | dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", |
916 | bd_ptr, (unsigned long long)bd_phys); | 984 | bd_ptr, (unsigned long long)bd_phys); |
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) | |||
927 | dma_free_coherent(&priv->pdev->dev, | 995 | dma_free_coherent(&priv->pdev->dev, |
928 | bd_num * sizeof(struct tsi721_dma_desc), | 996 | bd_num * sizeof(struct tsi721_dma_desc), |
929 | bd_ptr, bd_phys); | 997 | bd_ptr, bd_phys); |
930 | priv->bdma[chnum].bd_base = NULL; | 998 | priv->mdma.bd_base = NULL; |
931 | return -ENOMEM; | 999 | return -ENOMEM; |
932 | } | 1000 | } |
933 | 1001 | ||
934 | priv->bdma[chnum].sts_phys = sts_phys; | 1002 | priv->mdma.sts_phys = sts_phys; |
935 | priv->bdma[chnum].sts_base = sts_ptr; | 1003 | priv->mdma.sts_base = sts_ptr; |
936 | priv->bdma[chnum].sts_size = sts_size; | 1004 | priv->mdma.sts_size = sts_size; |
937 | 1005 | ||
938 | dev_dbg(&priv->pdev->dev, | 1006 | dev_dbg(&priv->pdev->dev, |
939 | "desc status FIFO @ %p (phys = %llx) size=0x%x\n", | 1007 | "desc status FIFO @ %p (phys = %llx) size=0x%x\n", |
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) | |||
946 | bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); | 1014 | bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); |
947 | 1015 | ||
948 | /* Setup DMA descriptor pointers */ | 1016 | /* Setup DMA descriptor pointers */ |
949 | iowrite32(((u64)bd_phys >> 32), | 1017 | iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH); |
950 | priv->regs + TSI721_DMAC_DPTRH(chnum)); | ||
951 | iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), | 1018 | iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), |
952 | priv->regs + TSI721_DMAC_DPTRL(chnum)); | 1019 | regs + TSI721_DMAC_DPTRL); |
953 | 1020 | ||
954 | /* Setup descriptor status FIFO */ | 1021 | /* Setup descriptor status FIFO */ |
955 | iowrite32(((u64)sts_phys >> 32), | 1022 | iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH); |
956 | priv->regs + TSI721_DMAC_DSBH(chnum)); | ||
957 | iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), | 1023 | iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), |
958 | priv->regs + TSI721_DMAC_DSBL(chnum)); | 1024 | regs + TSI721_DMAC_DSBL); |
959 | iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), | 1025 | iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), |
960 | priv->regs + TSI721_DMAC_DSSZ(chnum)); | 1026 | regs + TSI721_DMAC_DSSZ); |
961 | 1027 | ||
962 | /* Clear interrupt bits */ | 1028 | /* Clear interrupt bits */ |
963 | iowrite32(TSI721_DMAC_INT_ALL, | 1029 | iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); |
964 | priv->regs + TSI721_DMAC_INT(chnum)); | ||
965 | 1030 | ||
966 | ioread32(priv->regs + TSI721_DMAC_INT(chnum)); | 1031 | ioread32(regs + TSI721_DMAC_INT); |
967 | 1032 | ||
968 | /* Toggle DMA channel initialization */ | 1033 | /* Toggle DMA channel initialization */ |
969 | iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum)); | 1034 | iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); |
970 | ioread32(priv->regs + TSI721_DMAC_CTL(chnum)); | 1035 | ioread32(regs + TSI721_DMAC_CTL); |
971 | udelay(10); | 1036 | udelay(10); |
972 | 1037 | ||
973 | return 0; | 1038 | return 0; |
974 | } | 1039 | } |
975 | 1040 | ||
976 | static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum) | 1041 | static int tsi721_bdma_maint_free(struct tsi721_device *priv) |
977 | { | 1042 | { |
978 | u32 ch_stat; | 1043 | u32 ch_stat; |
1044 | struct tsi721_bdma_maint *mdma = &priv->mdma; | ||
1045 | void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id); | ||
979 | 1046 | ||
980 | if (priv->bdma[chnum].bd_base == NULL) | 1047 | if (mdma->bd_base == NULL) |
981 | return 0; | 1048 | return 0; |
982 | 1049 | ||
983 | /* Check if DMA channel still running */ | 1050 | /* Check if DMA channel still running */ |
984 | ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum)); | 1051 | ch_stat = ioread32(regs + TSI721_DMAC_STS); |
985 | if (ch_stat & TSI721_DMAC_STS_RUN) | 1052 | if (ch_stat & TSI721_DMAC_STS_RUN) |
986 | return -EFAULT; | 1053 | return -EFAULT; |
987 | 1054 | ||
988 | /* Put DMA channel into init state */ | 1055 | /* Put DMA channel into init state */ |
989 | iowrite32(TSI721_DMAC_CTL_INIT, | 1056 | iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); |
990 | priv->regs + TSI721_DMAC_CTL(chnum)); | ||
991 | 1057 | ||
992 | /* Free space allocated for DMA descriptors */ | 1058 | /* Free space allocated for DMA descriptors */ |
993 | dma_free_coherent(&priv->pdev->dev, | 1059 | dma_free_coherent(&priv->pdev->dev, |
994 | priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc), | 1060 | mdma->bd_num * sizeof(struct tsi721_dma_desc), |
995 | priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys); | 1061 | mdma->bd_base, mdma->bd_phys); |
996 | priv->bdma[chnum].bd_base = NULL; | 1062 | mdma->bd_base = NULL; |
997 | 1063 | ||
998 | /* Free space allocated for status FIFO */ | 1064 | /* Free space allocated for status FIFO */ |
999 | dma_free_coherent(&priv->pdev->dev, | 1065 | dma_free_coherent(&priv->pdev->dev, |
1000 | priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts), | 1066 | mdma->sts_size * sizeof(struct tsi721_dma_sts), |
1001 | priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys); | 1067 | mdma->sts_base, mdma->sts_phys); |
1002 | priv->bdma[chnum].sts_base = NULL; | 1068 | mdma->sts_base = NULL; |
1003 | return 0; | ||
1004 | } | ||
1005 | |||
1006 | static int tsi721_bdma_init(struct tsi721_device *priv) | ||
1007 | { | ||
1008 | /* Initialize BDMA channel allocated for RapidIO maintenance read/write | ||
1009 | * request generation | ||
1010 | */ | ||
1011 | priv->bdma[TSI721_DMACH_MAINT].bd_num = 2; | ||
1012 | if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) { | ||
1013 | dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA" | ||
1014 | " channel %d, aborting\n", TSI721_DMACH_MAINT); | ||
1015 | return -ENOMEM; | ||
1016 | } | ||
1017 | |||
1018 | return 0; | 1069 | return 0; |
1019 | } | 1070 | } |
1020 | 1071 | ||
1021 | static void tsi721_bdma_free(struct tsi721_device *priv) | ||
1022 | { | ||
1023 | tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT); | ||
1024 | } | ||
1025 | |||
1026 | /* Enable Inbound Messaging Interrupts */ | 1072 | /* Enable Inbound Messaging Interrupts */ |
1027 | static void | 1073 | static void |
1028 | tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, | 1074 | tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, |
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv) | |||
2035 | 2081 | ||
2036 | /* Disable all BDMA Channel interrupts */ | 2082 | /* Disable all BDMA Channel interrupts */ |
2037 | for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) | 2083 | for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) |
2038 | iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch)); | 2084 | iowrite32(0, |
2085 | priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); | ||
2039 | 2086 | ||
2040 | /* Disable all general BDMA interrupts */ | 2087 | /* Disable all general BDMA interrupts */ |
2041 | iowrite32(0, priv->regs + TSI721_BDMA_INTE); | 2088 | iowrite32(0, priv->regs + TSI721_BDMA_INTE); |
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv) | |||
2104 | mport->phy_type = RIO_PHY_SERIAL; | 2151 | mport->phy_type = RIO_PHY_SERIAL; |
2105 | mport->priv = (void *)priv; | 2152 | mport->priv = (void *)priv; |
2106 | mport->phys_efptr = 0x100; | 2153 | mport->phys_efptr = 0x100; |
2154 | priv->mport = mport; | ||
2107 | 2155 | ||
2108 | INIT_LIST_HEAD(&mport->dbells); | 2156 | INIT_LIST_HEAD(&mport->dbells); |
2109 | 2157 | ||
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv) | |||
2129 | if (!err) { | 2177 | if (!err) { |
2130 | tsi721_interrupts_init(priv); | 2178 | tsi721_interrupts_init(priv); |
2131 | ops->pwenable = tsi721_pw_enable; | 2179 | ops->pwenable = tsi721_pw_enable; |
2132 | } else | 2180 | } else { |
2133 | dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " | 2181 | dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " |
2134 | "vector %02X err=0x%x\n", pdev->irq, err); | 2182 | "vector %02X err=0x%x\n", pdev->irq, err); |
2183 | goto err_exit; | ||
2184 | } | ||
2135 | 2185 | ||
2186 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
2187 | tsi721_register_dma(priv); | ||
2188 | #endif | ||
2136 | /* Enable SRIO link */ | 2189 | /* Enable SRIO link */ |
2137 | iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | | 2190 | iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | |
2138 | TSI721_DEVCTL_SRBOOT_CMPL, | 2191 | TSI721_DEVCTL_SRBOOT_CMPL, |
2139 | priv->regs + TSI721_DEVCTL); | 2192 | priv->regs + TSI721_DEVCTL); |
2140 | 2193 | ||
2141 | rio_register_mport(mport); | 2194 | rio_register_mport(mport); |
2142 | priv->mport = mport; | ||
2143 | 2195 | ||
2144 | if (mport->host_deviceid >= 0) | 2196 | if (mport->host_deviceid >= 0) |
2145 | iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | | 2197 | iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv) | |||
2149 | iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); | 2201 | iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); |
2150 | 2202 | ||
2151 | return 0; | 2203 | return 0; |
2204 | |||
2205 | err_exit: | ||
2206 | kfree(mport); | ||
2207 | kfree(ops); | ||
2208 | return err; | ||
2152 | } | 2209 | } |
2153 | 2210 | ||
2154 | static int __devinit tsi721_probe(struct pci_dev *pdev, | 2211 | static int __devinit tsi721_probe(struct pci_dev *pdev, |
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, | |||
2294 | tsi721_init_pc2sr_mapping(priv); | 2351 | tsi721_init_pc2sr_mapping(priv); |
2295 | tsi721_init_sr2pc_mapping(priv); | 2352 | tsi721_init_sr2pc_mapping(priv); |
2296 | 2353 | ||
2297 | if (tsi721_bdma_init(priv)) { | 2354 | if (tsi721_bdma_maint_init(priv)) { |
2298 | dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); | 2355 | dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); |
2299 | err = -ENOMEM; | 2356 | err = -ENOMEM; |
2300 | goto err_unmap_bars; | 2357 | goto err_unmap_bars; |
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, | |||
2319 | err_free_consistent: | 2376 | err_free_consistent: |
2320 | tsi721_doorbell_free(priv); | 2377 | tsi721_doorbell_free(priv); |
2321 | err_free_bdma: | 2378 | err_free_bdma: |
2322 | tsi721_bdma_free(priv); | 2379 | tsi721_bdma_maint_free(priv); |
2323 | err_unmap_bars: | 2380 | err_unmap_bars: |
2324 | if (priv->regs) | 2381 | if (priv->regs) |
2325 | iounmap(priv->regs); | 2382 | iounmap(priv->regs); |
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 1c226b31af13..59de9d7be346 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h | |||
@@ -167,6 +167,8 @@ | |||
167 | #define TSI721_DEV_INTE 0x29840 | 167 | #define TSI721_DEV_INTE 0x29840 |
168 | #define TSI721_DEV_INT 0x29844 | 168 | #define TSI721_DEV_INT 0x29844 |
169 | #define TSI721_DEV_INTSET 0x29848 | 169 | #define TSI721_DEV_INTSET 0x29848 |
170 | #define TSI721_DEV_INT_BDMA_CH 0x00002000 | ||
171 | #define TSI721_DEV_INT_BDMA_NCH 0x00001000 | ||
170 | #define TSI721_DEV_INT_SMSG_CH 0x00000800 | 172 | #define TSI721_DEV_INT_SMSG_CH 0x00000800 |
171 | #define TSI721_DEV_INT_SMSG_NCH 0x00000400 | 173 | #define TSI721_DEV_INT_SMSG_NCH 0x00000400 |
172 | #define TSI721_DEV_INT_SR2PC_CH 0x00000200 | 174 | #define TSI721_DEV_INT_SR2PC_CH 0x00000200 |
@@ -181,6 +183,8 @@ | |||
181 | #define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x))) | 183 | #define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x))) |
182 | #define TSI721_INT_OMSG_CHAN_M 0x0000ff00 | 184 | #define TSI721_INT_OMSG_CHAN_M 0x0000ff00 |
183 | #define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x))) | 185 | #define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x))) |
186 | #define TSI721_INT_BDMA_CHAN_M 0x000000ff | ||
187 | #define TSI721_INT_BDMA_CHAN(x) (1 << (x)) | ||
184 | 188 | ||
185 | /* | 189 | /* |
186 | * PC2SR block registers | 190 | * PC2SR block registers |
@@ -235,14 +239,16 @@ | |||
235 | * x = 0..7 | 239 | * x = 0..7 |
236 | */ | 240 | */ |
237 | 241 | ||
238 | #define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000) | 242 | #define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000) |
239 | #define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000) | ||
240 | 243 | ||
241 | #define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000) | 244 | #define TSI721_DMAC_DWRCNT 0x000 |
245 | #define TSI721_DMAC_DRDCNT 0x004 | ||
246 | |||
247 | #define TSI721_DMAC_CTL 0x008 | ||
242 | #define TSI721_DMAC_CTL_SUSP 0x00000002 | 248 | #define TSI721_DMAC_CTL_SUSP 0x00000002 |
243 | #define TSI721_DMAC_CTL_INIT 0x00000001 | 249 | #define TSI721_DMAC_CTL_INIT 0x00000001 |
244 | 250 | ||
245 | #define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000) | 251 | #define TSI721_DMAC_INT 0x00c |
246 | #define TSI721_DMAC_INT_STFULL 0x00000010 | 252 | #define TSI721_DMAC_INT_STFULL 0x00000010 |
247 | #define TSI721_DMAC_INT_DONE 0x00000008 | 253 | #define TSI721_DMAC_INT_DONE 0x00000008 |
248 | #define TSI721_DMAC_INT_SUSP 0x00000004 | 254 | #define TSI721_DMAC_INT_SUSP 0x00000004 |
@@ -250,34 +256,33 @@ | |||
250 | #define TSI721_DMAC_INT_IOFDONE 0x00000001 | 256 | #define TSI721_DMAC_INT_IOFDONE 0x00000001 |
251 | #define TSI721_DMAC_INT_ALL 0x0000001f | 257 | #define TSI721_DMAC_INT_ALL 0x0000001f |
252 | 258 | ||
253 | #define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000) | 259 | #define TSI721_DMAC_INTSET 0x010 |
254 | 260 | ||
255 | #define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000) | 261 | #define TSI721_DMAC_STS 0x014 |
256 | #define TSI721_DMAC_STS_ABORT 0x00400000 | 262 | #define TSI721_DMAC_STS_ABORT 0x00400000 |
257 | #define TSI721_DMAC_STS_RUN 0x00200000 | 263 | #define TSI721_DMAC_STS_RUN 0x00200000 |
258 | #define TSI721_DMAC_STS_CS 0x001f0000 | 264 | #define TSI721_DMAC_STS_CS 0x001f0000 |
259 | 265 | ||
260 | #define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000) | 266 | #define TSI721_DMAC_INTE 0x018 |
261 | 267 | ||
262 | #define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000) | 268 | #define TSI721_DMAC_DPTRL 0x024 |
263 | #define TSI721_DMAC_DPTRL_MASK 0xffffffe0 | 269 | #define TSI721_DMAC_DPTRL_MASK 0xffffffe0 |
264 | 270 | ||
265 | #define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000) | 271 | #define TSI721_DMAC_DPTRH 0x028 |
266 | 272 | ||
267 | #define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000) | 273 | #define TSI721_DMAC_DSBL 0x02c |
268 | #define TSI721_DMAC_DSBL_MASK 0xffffffc0 | 274 | #define TSI721_DMAC_DSBL_MASK 0xffffffc0 |
269 | 275 | ||
270 | #define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000) | 276 | #define TSI721_DMAC_DSBH 0x030 |
271 | 277 | ||
272 | #define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000) | 278 | #define TSI721_DMAC_DSSZ 0x034 |
273 | #define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f | 279 | #define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f |
274 | #define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4) | 280 | #define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4) |
275 | 281 | ||
276 | 282 | #define TSI721_DMAC_DSRP 0x038 | |
277 | #define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000) | ||
278 | #define TSI721_DMAC_DSRP_MASK 0x0007ffff | 283 | #define TSI721_DMAC_DSRP_MASK 0x0007ffff |
279 | 284 | ||
280 | #define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000) | 285 | #define TSI721_DMAC_DSWP 0x03c |
281 | #define TSI721_DMAC_DSWP_MASK 0x0007ffff | 286 | #define TSI721_DMAC_DSWP_MASK 0x0007ffff |
282 | 287 | ||
283 | #define TSI721_BDMA_INTE 0x5f000 | 288 | #define TSI721_BDMA_INTE 0x5f000 |
@@ -612,6 +617,8 @@ enum dma_rtype { | |||
612 | #define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ | 617 | #define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ |
613 | #define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ | 618 | #define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ |
614 | 619 | ||
620 | #define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */ | ||
621 | |||
615 | #define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0) | 622 | #define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0) |
616 | 623 | ||
617 | enum tsi721_smsg_int_flag { | 624 | enum tsi721_smsg_int_flag { |
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag { | |||
626 | 633 | ||
627 | /* Structures */ | 634 | /* Structures */ |
628 | 635 | ||
636 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
637 | |||
638 | struct tsi721_tx_desc { | ||
639 | struct dma_async_tx_descriptor txd; | ||
640 | struct tsi721_dma_desc *hw_desc; | ||
641 | u16 destid; | ||
642 | /* low 64-bits of 66-bit RIO address */ | ||
643 | u64 rio_addr; | ||
644 | /* upper 2-bits of 66-bit RIO address */ | ||
645 | u8 rio_addr_u; | ||
646 | bool interrupt; | ||
647 | struct list_head desc_node; | ||
648 | struct list_head tx_list; | ||
649 | }; | ||
650 | |||
629 | struct tsi721_bdma_chan { | 651 | struct tsi721_bdma_chan { |
652 | int id; | ||
653 | void __iomem *regs; | ||
654 | int bd_num; /* number of buffer descriptors */ | ||
655 | void *bd_base; /* start of DMA descriptors */ | ||
656 | dma_addr_t bd_phys; | ||
657 | void *sts_base; /* start of DMA BD status FIFO */ | ||
658 | dma_addr_t sts_phys; | ||
659 | int sts_size; | ||
660 | u32 sts_rdptr; | ||
661 | u32 wr_count; | ||
662 | u32 wr_count_next; | ||
663 | |||
664 | struct dma_chan dchan; | ||
665 | struct tsi721_tx_desc *tx_desc; | ||
666 | spinlock_t lock; | ||
667 | struct list_head active_list; | ||
668 | struct list_head queue; | ||
669 | struct list_head free_list; | ||
670 | dma_cookie_t completed_cookie; | ||
671 | struct tasklet_struct tasklet; | ||
672 | }; | ||
673 | |||
674 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
675 | |||
676 | struct tsi721_bdma_maint { | ||
677 | int ch_id; /* BDMA channel number */ | ||
630 | int bd_num; /* number of buffer descriptors */ | 678 | int bd_num; /* number of buffer descriptors */ |
631 | void *bd_base; /* start of DMA descriptors */ | 679 | void *bd_base; /* start of DMA descriptors */ |
632 | dma_addr_t bd_phys; | 680 | dma_addr_t bd_phys; |
@@ -721,6 +769,24 @@ enum tsi721_msix_vect { | |||
721 | TSI721_VECT_IMB1_INT, | 769 | TSI721_VECT_IMB1_INT, |
722 | TSI721_VECT_IMB2_INT, | 770 | TSI721_VECT_IMB2_INT, |
723 | TSI721_VECT_IMB3_INT, | 771 | TSI721_VECT_IMB3_INT, |
772 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
773 | TSI721_VECT_DMA0_DONE, | ||
774 | TSI721_VECT_DMA1_DONE, | ||
775 | TSI721_VECT_DMA2_DONE, | ||
776 | TSI721_VECT_DMA3_DONE, | ||
777 | TSI721_VECT_DMA4_DONE, | ||
778 | TSI721_VECT_DMA5_DONE, | ||
779 | TSI721_VECT_DMA6_DONE, | ||
780 | TSI721_VECT_DMA7_DONE, | ||
781 | TSI721_VECT_DMA0_INT, | ||
782 | TSI721_VECT_DMA1_INT, | ||
783 | TSI721_VECT_DMA2_INT, | ||
784 | TSI721_VECT_DMA3_INT, | ||
785 | TSI721_VECT_DMA4_INT, | ||
786 | TSI721_VECT_DMA5_INT, | ||
787 | TSI721_VECT_DMA6_INT, | ||
788 | TSI721_VECT_DMA7_INT, | ||
789 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
724 | TSI721_VECT_MAX | 790 | TSI721_VECT_MAX |
725 | }; | 791 | }; |
726 | 792 | ||
@@ -754,7 +820,11 @@ struct tsi721_device { | |||
754 | u32 pw_discard_count; | 820 | u32 pw_discard_count; |
755 | 821 | ||
756 | /* BDMA Engine */ | 822 | /* BDMA Engine */ |
823 | struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */ | ||
824 | |||
825 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
757 | struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; | 826 | struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; |
827 | #endif | ||
758 | 828 | ||
759 | /* Inbound Messaging */ | 829 | /* Inbound Messaging */ |
760 | int imsg_init[TSI721_IMSG_CHNUM]; | 830 | int imsg_init[TSI721_IMSG_CHNUM]; |
@@ -765,4 +835,9 @@ struct tsi721_device { | |||
765 | struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; | 835 | struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; |
766 | }; | 836 | }; |
767 | 837 | ||
838 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
839 | extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan); | ||
840 | extern int __devinit tsi721_register_dma(struct tsi721_device *priv); | ||
841 | #endif | ||
842 | |||
768 | #endif | 843 | #endif |
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c new file mode 100644 index 000000000000..92e06a5c62ec --- /dev/null +++ b/drivers/rapidio/devices/tsi721_dma.c | |||
@@ -0,0 +1,823 @@ | |||
1 | /* | ||
2 | * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge | ||
3 | * | ||
4 | * Copyright 2011 Integrated Device Technology, Inc. | ||
5 | * Alexandre Bounine <alexandre.bounine@idt.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/io.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/ioport.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/pci.h> | ||
29 | #include <linux/rio.h> | ||
30 | #include <linux/rio_drv.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/kfifo.h> | ||
34 | #include <linux/delay.h> | ||
35 | |||
36 | #include "tsi721.h" | ||
37 | |||
38 | static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) | ||
39 | { | ||
40 | return container_of(chan, struct tsi721_bdma_chan, dchan); | ||
41 | } | ||
42 | |||
43 | static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) | ||
44 | { | ||
45 | return container_of(ddev, struct rio_mport, dma)->priv; | ||
46 | } | ||
47 | |||
48 | static inline | ||
49 | struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) | ||
50 | { | ||
51 | return container_of(txd, struct tsi721_tx_desc, txd); | ||
52 | } | ||
53 | |||
54 | static inline | ||
55 | struct tsi721_tx_desc *tsi721_dma_first_active( | ||
56 | struct tsi721_bdma_chan *bdma_chan) | ||
57 | { | ||
58 | return list_first_entry(&bdma_chan->active_list, | ||
59 | struct tsi721_tx_desc, desc_node); | ||
60 | } | ||
61 | |||
62 | static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) | ||
63 | { | ||
64 | struct tsi721_dma_desc *bd_ptr; | ||
65 | struct device *dev = bdma_chan->dchan.device->dev; | ||
66 | u64 *sts_ptr; | ||
67 | dma_addr_t bd_phys; | ||
68 | dma_addr_t sts_phys; | ||
69 | int sts_size; | ||
70 | int bd_num = bdma_chan->bd_num; | ||
71 | |||
72 | dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); | ||
73 | |||
74 | /* Allocate space for DMA descriptors */ | ||
75 | bd_ptr = dma_zalloc_coherent(dev, | ||
76 | bd_num * sizeof(struct tsi721_dma_desc), | ||
77 | &bd_phys, GFP_KERNEL); | ||
78 | if (!bd_ptr) | ||
79 | return -ENOMEM; | ||
80 | |||
81 | bdma_chan->bd_phys = bd_phys; | ||
82 | bdma_chan->bd_base = bd_ptr; | ||
83 | |||
84 | dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n", | ||
85 | bd_ptr, (unsigned long long)bd_phys); | ||
86 | |||
87 | /* Allocate space for descriptor status FIFO */ | ||
88 | sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? | ||
89 | bd_num : TSI721_DMA_MINSTSSZ; | ||
90 | sts_size = roundup_pow_of_two(sts_size); | ||
91 | sts_ptr = dma_zalloc_coherent(dev, | ||
92 | sts_size * sizeof(struct tsi721_dma_sts), | ||
93 | &sts_phys, GFP_KERNEL); | ||
94 | if (!sts_ptr) { | ||
95 | /* Free space allocated for DMA descriptors */ | ||
96 | dma_free_coherent(dev, | ||
97 | bd_num * sizeof(struct tsi721_dma_desc), | ||
98 | bd_ptr, bd_phys); | ||
99 | bdma_chan->bd_base = NULL; | ||
100 | return -ENOMEM; | ||
101 | } | ||
102 | |||
103 | bdma_chan->sts_phys = sts_phys; | ||
104 | bdma_chan->sts_base = sts_ptr; | ||
105 | bdma_chan->sts_size = sts_size; | ||
106 | |||
107 | dev_dbg(dev, | ||
108 | "desc status FIFO @ %p (phys = %llx) size=0x%x\n", | ||
109 | sts_ptr, (unsigned long long)sts_phys, sts_size); | ||
110 | |||
111 | /* Initialize DMA descriptors ring */ | ||
112 | bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); | ||
113 | bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & | ||
114 | TSI721_DMAC_DPTRL_MASK); | ||
115 | bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); | ||
116 | |||
117 | /* Setup DMA descriptor pointers */ | ||
118 | iowrite32(((u64)bd_phys >> 32), | ||
119 | bdma_chan->regs + TSI721_DMAC_DPTRH); | ||
120 | iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), | ||
121 | bdma_chan->regs + TSI721_DMAC_DPTRL); | ||
122 | |||
123 | /* Setup descriptor status FIFO */ | ||
124 | iowrite32(((u64)sts_phys >> 32), | ||
125 | bdma_chan->regs + TSI721_DMAC_DSBH); | ||
126 | iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), | ||
127 | bdma_chan->regs + TSI721_DMAC_DSBL); | ||
128 | iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), | ||
129 | bdma_chan->regs + TSI721_DMAC_DSSZ); | ||
130 | |||
131 | /* Clear interrupt bits */ | ||
132 | iowrite32(TSI721_DMAC_INT_ALL, | ||
133 | bdma_chan->regs + TSI721_DMAC_INT); | ||
134 | |||
135 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | ||
136 | |||
137 | /* Toggle DMA channel initialization */ | ||
138 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | ||
139 | ioread32(bdma_chan->regs + TSI721_DMAC_CTL); | ||
140 | bdma_chan->wr_count = bdma_chan->wr_count_next = 0; | ||
141 | bdma_chan->sts_rdptr = 0; | ||
142 | udelay(10); | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) | ||
148 | { | ||
149 | u32 ch_stat; | ||
150 | |||
151 | if (bdma_chan->bd_base == NULL) | ||
152 | return 0; | ||
153 | |||
154 | /* Check if DMA channel still running */ | ||
155 | ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | ||
156 | if (ch_stat & TSI721_DMAC_STS_RUN) | ||
157 | return -EFAULT; | ||
158 | |||
159 | /* Put DMA channel into init state */ | ||
160 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | ||
161 | |||
162 | /* Free space allocated for DMA descriptors */ | ||
163 | dma_free_coherent(bdma_chan->dchan.device->dev, | ||
164 | bdma_chan->bd_num * sizeof(struct tsi721_dma_desc), | ||
165 | bdma_chan->bd_base, bdma_chan->bd_phys); | ||
166 | bdma_chan->bd_base = NULL; | ||
167 | |||
168 | /* Free space allocated for status FIFO */ | ||
169 | dma_free_coherent(bdma_chan->dchan.device->dev, | ||
170 | bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), | ||
171 | bdma_chan->sts_base, bdma_chan->sts_phys); | ||
172 | bdma_chan->sts_base = NULL; | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static void | ||
177 | tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) | ||
178 | { | ||
179 | if (enable) { | ||
180 | /* Clear pending BDMA channel interrupts */ | ||
181 | iowrite32(TSI721_DMAC_INT_ALL, | ||
182 | bdma_chan->regs + TSI721_DMAC_INT); | ||
183 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | ||
184 | /* Enable BDMA channel interrupts */ | ||
185 | iowrite32(TSI721_DMAC_INT_ALL, | ||
186 | bdma_chan->regs + TSI721_DMAC_INTE); | ||
187 | } else { | ||
188 | /* Disable BDMA channel interrupts */ | ||
189 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | ||
190 | /* Clear pending BDMA channel interrupts */ | ||
191 | iowrite32(TSI721_DMAC_INT_ALL, | ||
192 | bdma_chan->regs + TSI721_DMAC_INT); | ||
193 | } | ||
194 | |||
195 | } | ||
196 | |||
197 | static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) | ||
198 | { | ||
199 | u32 sts; | ||
200 | |||
201 | sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | ||
202 | return ((sts & TSI721_DMAC_STS_RUN) == 0); | ||
203 | } | ||
204 | |||
205 | void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) | ||
206 | { | ||
207 | /* Disable BDMA channel interrupts */ | ||
208 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | ||
209 | |||
210 | tasklet_schedule(&bdma_chan->tasklet); | ||
211 | } | ||
212 | |||
213 | #ifdef CONFIG_PCI_MSI | ||
214 | /** | ||
215 | * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels | ||
216 | * @irq: Linux interrupt number | ||
217 | * @ptr: Pointer to interrupt-specific data (BDMA channel structure) | ||
218 | * | ||
219 | * Handles BDMA channel interrupts signaled using MSI-X. | ||
220 | */ | ||
221 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) | ||
222 | { | ||
223 | struct tsi721_bdma_chan *bdma_chan = ptr; | ||
224 | |||
225 | tsi721_bdma_handler(bdma_chan); | ||
226 | return IRQ_HANDLED; | ||
227 | } | ||
228 | #endif /* CONFIG_PCI_MSI */ | ||
229 | |||
230 | /* Must be called with the spinlock held */ | ||
231 | static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) | ||
232 | { | ||
233 | if (!tsi721_dma_is_idle(bdma_chan)) { | ||
234 | dev_err(bdma_chan->dchan.device->dev, | ||
235 | "BUG: Attempt to start non-idle channel\n"); | ||
236 | return; | ||
237 | } | ||
238 | |||
239 | if (bdma_chan->wr_count == bdma_chan->wr_count_next) { | ||
240 | dev_err(bdma_chan->dchan.device->dev, | ||
241 | "BUG: Attempt to start DMA with no BDs ready\n"); | ||
242 | return; | ||
243 | } | ||
244 | |||
245 | dev_dbg(bdma_chan->dchan.device->dev, | ||
246 | "tx_chan: %p, chan: %d, regs: %p\n", | ||
247 | bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs); | ||
248 | |||
249 | iowrite32(bdma_chan->wr_count_next, | ||
250 | bdma_chan->regs + TSI721_DMAC_DWRCNT); | ||
251 | ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); | ||
252 | |||
253 | bdma_chan->wr_count = bdma_chan->wr_count_next; | ||
254 | } | ||
255 | |||
256 | static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan, | ||
257 | struct tsi721_tx_desc *desc) | ||
258 | { | ||
259 | dev_dbg(bdma_chan->dchan.device->dev, | ||
260 | "Put desc: %p into free list\n", desc); | ||
261 | |||
262 | if (desc) { | ||
263 | spin_lock_bh(&bdma_chan->lock); | ||
264 | list_splice_init(&desc->tx_list, &bdma_chan->free_list); | ||
265 | list_add(&desc->desc_node, &bdma_chan->free_list); | ||
266 | bdma_chan->wr_count_next = bdma_chan->wr_count; | ||
267 | spin_unlock_bh(&bdma_chan->lock); | ||
268 | } | ||
269 | } | ||
270 | |||
271 | static | ||
272 | struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) | ||
273 | { | ||
274 | struct tsi721_tx_desc *tx_desc, *_tx_desc; | ||
275 | struct tsi721_tx_desc *ret = NULL; | ||
276 | int i; | ||
277 | |||
278 | spin_lock_bh(&bdma_chan->lock); | ||
279 | list_for_each_entry_safe(tx_desc, _tx_desc, | ||
280 | &bdma_chan->free_list, desc_node) { | ||
281 | if (async_tx_test_ack(&tx_desc->txd)) { | ||
282 | list_del(&tx_desc->desc_node); | ||
283 | ret = tx_desc; | ||
284 | break; | ||
285 | } | ||
286 | dev_dbg(bdma_chan->dchan.device->dev, | ||
287 | "desc %p not ACKed\n", tx_desc); | ||
288 | } | ||
289 | |||
290 | i = bdma_chan->wr_count_next % bdma_chan->bd_num; | ||
291 | if (i == bdma_chan->bd_num - 1) { | ||
292 | i = 0; | ||
293 | bdma_chan->wr_count_next++; /* skip link descriptor */ | ||
294 | } | ||
295 | |||
296 | bdma_chan->wr_count_next++; | ||
297 | tx_desc->txd.phys = bdma_chan->bd_phys + | ||
298 | i * sizeof(struct tsi721_dma_desc); | ||
299 | tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; | ||
300 | |||
301 | spin_unlock_bh(&bdma_chan->lock); | ||
302 | |||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | static int | ||
307 | tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan, | ||
308 | struct tsi721_tx_desc *desc, struct scatterlist *sg, | ||
309 | enum dma_rtype rtype, u32 sys_size) | ||
310 | { | ||
311 | struct tsi721_dma_desc *bd_ptr = desc->hw_desc; | ||
312 | u64 rio_addr; | ||
313 | |||
314 | if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) { | ||
315 | dev_err(bdma_chan->dchan.device->dev, | ||
316 | "SG element is too large\n"); | ||
317 | return -EINVAL; | ||
318 | } | ||
319 | |||
320 | dev_dbg(bdma_chan->dchan.device->dev, | ||
321 | "desc: 0x%llx, addr: 0x%llx len: 0x%x\n", | ||
322 | (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg), | ||
323 | sg_dma_len(sg)); | ||
324 | |||
325 | dev_dbg(bdma_chan->dchan.device->dev, | ||
326 | "bd_ptr = %p did=%d raddr=0x%llx\n", | ||
327 | bd_ptr, desc->destid, desc->rio_addr); | ||
328 | |||
329 | /* Initialize DMA descriptor */ | ||
330 | bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | | ||
331 | (rtype << 19) | desc->destid); | ||
332 | if (desc->interrupt) | ||
333 | bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); | ||
334 | bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | | ||
335 | (sys_size << 26) | sg_dma_len(sg)); | ||
336 | rio_addr = (desc->rio_addr >> 2) | | ||
337 | ((u64)(desc->rio_addr_u & 0x3) << 62); | ||
338 | bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); | ||
339 | bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); | ||
340 | bd_ptr->t1.bufptr_lo = cpu_to_le32( | ||
341 | (u64)sg_dma_address(sg) & 0xffffffff); | ||
342 | bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); | ||
343 | bd_ptr->t1.s_dist = 0; | ||
344 | bd_ptr->t1.s_size = 0; | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan, | ||
350 | struct tsi721_tx_desc *desc) | ||
351 | { | ||
352 | struct dma_async_tx_descriptor *txd = &desc->txd; | ||
353 | dma_async_tx_callback callback = txd->callback; | ||
354 | void *param = txd->callback_param; | ||
355 | |||
356 | list_splice_init(&desc->tx_list, &bdma_chan->free_list); | ||
357 | list_move(&desc->desc_node, &bdma_chan->free_list); | ||
358 | bdma_chan->completed_cookie = txd->cookie; | ||
359 | |||
360 | if (callback) | ||
361 | callback(param); | ||
362 | } | ||
363 | |||
364 | static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan) | ||
365 | { | ||
366 | struct tsi721_tx_desc *desc, *_d; | ||
367 | LIST_HEAD(list); | ||
368 | |||
369 | BUG_ON(!tsi721_dma_is_idle(bdma_chan)); | ||
370 | |||
371 | if (!list_empty(&bdma_chan->queue)) | ||
372 | tsi721_start_dma(bdma_chan); | ||
373 | |||
374 | list_splice_init(&bdma_chan->active_list, &list); | ||
375 | list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); | ||
376 | |||
377 | list_for_each_entry_safe(desc, _d, &list, desc_node) | ||
378 | tsi721_dma_chain_complete(bdma_chan, desc); | ||
379 | } | ||
380 | |||
381 | static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) | ||
382 | { | ||
383 | u32 srd_ptr; | ||
384 | u64 *sts_ptr; | ||
385 | int i, j; | ||
386 | |||
387 | /* Check and clear descriptor status FIFO entries */ | ||
388 | srd_ptr = bdma_chan->sts_rdptr; | ||
389 | sts_ptr = bdma_chan->sts_base; | ||
390 | j = srd_ptr * 8; | ||
391 | while (sts_ptr[j]) { | ||
392 | for (i = 0; i < 8 && sts_ptr[j]; i++, j++) | ||
393 | sts_ptr[j] = 0; | ||
394 | |||
395 | ++srd_ptr; | ||
396 | srd_ptr %= bdma_chan->sts_size; | ||
397 | j = srd_ptr * 8; | ||
398 | } | ||
399 | |||
400 | iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); | ||
401 | bdma_chan->sts_rdptr = srd_ptr; | ||
402 | } | ||
403 | |||
404 | static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) | ||
405 | { | ||
406 | if (list_empty(&bdma_chan->active_list) || | ||
407 | list_is_singular(&bdma_chan->active_list)) { | ||
408 | dev_dbg(bdma_chan->dchan.device->dev, | ||
409 | "%s: Active_list empty\n", __func__); | ||
410 | tsi721_dma_complete_all(bdma_chan); | ||
411 | } else { | ||
412 | dev_dbg(bdma_chan->dchan.device->dev, | ||
413 | "%s: Active_list NOT empty\n", __func__); | ||
414 | tsi721_dma_chain_complete(bdma_chan, | ||
415 | tsi721_dma_first_active(bdma_chan)); | ||
416 | tsi721_start_dma(bdma_chan); | ||
417 | } | ||
418 | } | ||
419 | |||
420 | static void tsi721_dma_tasklet(unsigned long data) | ||
421 | { | ||
422 | struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; | ||
423 | u32 dmac_int, dmac_sts; | ||
424 | |||
425 | dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); | ||
426 | dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n", | ||
427 | __func__, bdma_chan->id, dmac_int); | ||
428 | /* Clear channel interrupts */ | ||
429 | iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); | ||
430 | |||
431 | if (dmac_int & TSI721_DMAC_INT_ERR) { | ||
432 | dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | ||
433 | dev_err(bdma_chan->dchan.device->dev, | ||
434 | "%s: DMA ERROR - DMAC%d_STS = 0x%x\n", | ||
435 | __func__, bdma_chan->id, dmac_sts); | ||
436 | } | ||
437 | |||
438 | if (dmac_int & TSI721_DMAC_INT_STFULL) { | ||
439 | dev_err(bdma_chan->dchan.device->dev, | ||
440 | "%s: DMAC%d descriptor status FIFO is full\n", | ||
441 | __func__, bdma_chan->id); | ||
442 | } | ||
443 | |||
444 | if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { | ||
445 | tsi721_clr_stat(bdma_chan); | ||
446 | spin_lock(&bdma_chan->lock); | ||
447 | tsi721_advance_work(bdma_chan); | ||
448 | spin_unlock(&bdma_chan->lock); | ||
449 | } | ||
450 | |||
451 | /* Re-Enable BDMA channel interrupts */ | ||
452 | iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); | ||
453 | } | ||
454 | |||
455 | static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) | ||
456 | { | ||
457 | struct tsi721_tx_desc *desc = to_tsi721_desc(txd); | ||
458 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); | ||
459 | dma_cookie_t cookie; | ||
460 | |||
461 | spin_lock_bh(&bdma_chan->lock); | ||
462 | |||
463 | cookie = txd->chan->cookie; | ||
464 | if (++cookie < 0) | ||
465 | cookie = 1; | ||
466 | txd->chan->cookie = cookie; | ||
467 | txd->cookie = cookie; | ||
468 | |||
469 | if (list_empty(&bdma_chan->active_list)) { | ||
470 | list_add_tail(&desc->desc_node, &bdma_chan->active_list); | ||
471 | tsi721_start_dma(bdma_chan); | ||
472 | } else { | ||
473 | list_add_tail(&desc->desc_node, &bdma_chan->queue); | ||
474 | } | ||
475 | |||
476 | spin_unlock_bh(&bdma_chan->lock); | ||
477 | return cookie; | ||
478 | } | ||
479 | |||
480 | static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | ||
481 | { | ||
482 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
483 | #ifdef CONFIG_PCI_MSI | ||
484 | struct tsi721_device *priv = to_tsi721(dchan->device); | ||
485 | #endif | ||
486 | struct tsi721_tx_desc *desc = NULL; | ||
487 | LIST_HEAD(tmp_list); | ||
488 | int i; | ||
489 | int rc; | ||
490 | |||
491 | if (bdma_chan->bd_base) | ||
492 | return bdma_chan->bd_num - 1; | ||
493 | |||
494 | /* Initialize BDMA channel */ | ||
495 | if (tsi721_bdma_ch_init(bdma_chan)) { | ||
496 | dev_err(dchan->device->dev, "Unable to initialize data DMA" | ||
497 | " channel %d, aborting\n", bdma_chan->id); | ||
498 | return -ENOMEM; | ||
499 | } | ||
500 | |||
501 | /* Alocate matching number of logical descriptors */ | ||
502 | desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc), | ||
503 | GFP_KERNEL); | ||
504 | if (!desc) { | ||
505 | dev_err(dchan->device->dev, | ||
506 | "Failed to allocate logical descriptors\n"); | ||
507 | rc = -ENOMEM; | ||
508 | goto err_out; | ||
509 | } | ||
510 | |||
511 | bdma_chan->tx_desc = desc; | ||
512 | |||
513 | for (i = 0; i < bdma_chan->bd_num - 1; i++) { | ||
514 | dma_async_tx_descriptor_init(&desc[i].txd, dchan); | ||
515 | desc[i].txd.tx_submit = tsi721_tx_submit; | ||
516 | desc[i].txd.flags = DMA_CTRL_ACK; | ||
517 | INIT_LIST_HEAD(&desc[i].tx_list); | ||
518 | list_add_tail(&desc[i].desc_node, &tmp_list); | ||
519 | } | ||
520 | |||
521 | spin_lock_bh(&bdma_chan->lock); | ||
522 | list_splice(&tmp_list, &bdma_chan->free_list); | ||
523 | bdma_chan->completed_cookie = dchan->cookie = 1; | ||
524 | spin_unlock_bh(&bdma_chan->lock); | ||
525 | |||
526 | #ifdef CONFIG_PCI_MSI | ||
527 | if (priv->flags & TSI721_USING_MSIX) { | ||
528 | /* Request interrupt service if we are in MSI-X mode */ | ||
529 | rc = request_irq( | ||
530 | priv->msix[TSI721_VECT_DMA0_DONE + | ||
531 | bdma_chan->id].vector, | ||
532 | tsi721_bdma_msix, 0, | ||
533 | priv->msix[TSI721_VECT_DMA0_DONE + | ||
534 | bdma_chan->id].irq_name, | ||
535 | (void *)bdma_chan); | ||
536 | |||
537 | if (rc) { | ||
538 | dev_dbg(dchan->device->dev, | ||
539 | "Unable to allocate MSI-X interrupt for " | ||
540 | "BDMA%d-DONE\n", bdma_chan->id); | ||
541 | goto err_out; | ||
542 | } | ||
543 | |||
544 | rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT + | ||
545 | bdma_chan->id].vector, | ||
546 | tsi721_bdma_msix, 0, | ||
547 | priv->msix[TSI721_VECT_DMA0_INT + | ||
548 | bdma_chan->id].irq_name, | ||
549 | (void *)bdma_chan); | ||
550 | |||
551 | if (rc) { | ||
552 | dev_dbg(dchan->device->dev, | ||
553 | "Unable to allocate MSI-X interrupt for " | ||
554 | "BDMA%d-INT\n", bdma_chan->id); | ||
555 | free_irq( | ||
556 | priv->msix[TSI721_VECT_DMA0_DONE + | ||
557 | bdma_chan->id].vector, | ||
558 | (void *)bdma_chan); | ||
559 | rc = -EIO; | ||
560 | goto err_out; | ||
561 | } | ||
562 | } | ||
563 | #endif /* CONFIG_PCI_MSI */ | ||
564 | |||
565 | tasklet_enable(&bdma_chan->tasklet); | ||
566 | tsi721_bdma_interrupt_enable(bdma_chan, 1); | ||
567 | |||
568 | return bdma_chan->bd_num - 1; | ||
569 | |||
570 | err_out: | ||
571 | kfree(desc); | ||
572 | tsi721_bdma_ch_free(bdma_chan); | ||
573 | return rc; | ||
574 | } | ||
575 | |||
576 | static void tsi721_free_chan_resources(struct dma_chan *dchan) | ||
577 | { | ||
578 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
579 | #ifdef CONFIG_PCI_MSI | ||
580 | struct tsi721_device *priv = to_tsi721(dchan->device); | ||
581 | #endif | ||
582 | LIST_HEAD(list); | ||
583 | |||
584 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | ||
585 | |||
586 | if (bdma_chan->bd_base == NULL) | ||
587 | return; | ||
588 | |||
589 | BUG_ON(!list_empty(&bdma_chan->active_list)); | ||
590 | BUG_ON(!list_empty(&bdma_chan->queue)); | ||
591 | |||
592 | tasklet_disable(&bdma_chan->tasklet); | ||
593 | |||
594 | spin_lock_bh(&bdma_chan->lock); | ||
595 | list_splice_init(&bdma_chan->free_list, &list); | ||
596 | spin_unlock_bh(&bdma_chan->lock); | ||
597 | |||
598 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | ||
599 | |||
600 | #ifdef CONFIG_PCI_MSI | ||
601 | if (priv->flags & TSI721_USING_MSIX) { | ||
602 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + | ||
603 | bdma_chan->id].vector, (void *)bdma_chan); | ||
604 | free_irq(priv->msix[TSI721_VECT_DMA0_INT + | ||
605 | bdma_chan->id].vector, (void *)bdma_chan); | ||
606 | } | ||
607 | #endif /* CONFIG_PCI_MSI */ | ||
608 | |||
609 | tsi721_bdma_ch_free(bdma_chan); | ||
610 | kfree(bdma_chan->tx_desc); | ||
611 | } | ||
612 | |||
613 | static | ||
614 | enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, | ||
615 | struct dma_tx_state *txstate) | ||
616 | { | ||
617 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
618 | dma_cookie_t last_used; | ||
619 | dma_cookie_t last_completed; | ||
620 | int ret; | ||
621 | |||
622 | spin_lock_bh(&bdma_chan->lock); | ||
623 | last_completed = bdma_chan->completed_cookie; | ||
624 | last_used = dchan->cookie; | ||
625 | spin_unlock_bh(&bdma_chan->lock); | ||
626 | |||
627 | ret = dma_async_is_complete(cookie, last_completed, last_used); | ||
628 | |||
629 | dma_set_tx_state(txstate, last_completed, last_used, 0); | ||
630 | |||
631 | dev_dbg(dchan->device->dev, | ||
632 | "%s: exit, ret: %d, last_completed: %d, last_used: %d\n", | ||
633 | __func__, ret, last_completed, last_used); | ||
634 | |||
635 | return ret; | ||
636 | } | ||
637 | |||
638 | static void tsi721_issue_pending(struct dma_chan *dchan) | ||
639 | { | ||
640 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
641 | |||
642 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | ||
643 | |||
644 | if (tsi721_dma_is_idle(bdma_chan)) { | ||
645 | spin_lock_bh(&bdma_chan->lock); | ||
646 | tsi721_advance_work(bdma_chan); | ||
647 | spin_unlock_bh(&bdma_chan->lock); | ||
648 | } else | ||
649 | dev_dbg(dchan->device->dev, | ||
650 | "%s: DMA channel still busy\n", __func__); | ||
651 | } | ||
652 | |||
653 | static | ||
654 | struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, | ||
655 | struct scatterlist *sgl, unsigned int sg_len, | ||
656 | enum dma_transfer_direction dir, unsigned long flags, | ||
657 | void *tinfo) | ||
658 | { | ||
659 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
660 | struct tsi721_tx_desc *desc = NULL; | ||
661 | struct tsi721_tx_desc *first = NULL; | ||
662 | struct scatterlist *sg; | ||
663 | struct rio_dma_ext *rext = tinfo; | ||
664 | u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */ | ||
665 | unsigned int i; | ||
666 | u32 sys_size = dma_to_mport(dchan->device)->sys_size; | ||
667 | enum dma_rtype rtype; | ||
668 | |||
669 | if (!sgl || !sg_len) { | ||
670 | dev_err(dchan->device->dev, "%s: No SG list\n", __func__); | ||
671 | return NULL; | ||
672 | } | ||
673 | |||
674 | if (dir == DMA_DEV_TO_MEM) | ||
675 | rtype = NREAD; | ||
676 | else if (dir == DMA_MEM_TO_DEV) { | ||
677 | switch (rext->wr_type) { | ||
678 | case RDW_ALL_NWRITE: | ||
679 | rtype = ALL_NWRITE; | ||
680 | break; | ||
681 | case RDW_ALL_NWRITE_R: | ||
682 | rtype = ALL_NWRITE_R; | ||
683 | break; | ||
684 | case RDW_LAST_NWRITE_R: | ||
685 | default: | ||
686 | rtype = LAST_NWRITE_R; | ||
687 | break; | ||
688 | } | ||
689 | } else { | ||
690 | dev_err(dchan->device->dev, | ||
691 | "%s: Unsupported DMA direction option\n", __func__); | ||
692 | return NULL; | ||
693 | } | ||
694 | |||
695 | for_each_sg(sgl, sg, sg_len, i) { | ||
696 | int err; | ||
697 | |||
698 | dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i); | ||
699 | desc = tsi721_desc_get(bdma_chan); | ||
700 | if (!desc) { | ||
701 | dev_err(dchan->device->dev, | ||
702 | "Not enough descriptors available\n"); | ||
703 | goto err_desc_get; | ||
704 | } | ||
705 | |||
706 | if (sg_is_last(sg)) | ||
707 | desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; | ||
708 | else | ||
709 | desc->interrupt = false; | ||
710 | |||
711 | desc->destid = rext->destid; | ||
712 | desc->rio_addr = rio_addr; | ||
713 | desc->rio_addr_u = 0; | ||
714 | |||
715 | err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size); | ||
716 | if (err) { | ||
717 | dev_err(dchan->device->dev, | ||
718 | "Failed to build desc: %d\n", err); | ||
719 | goto err_desc_get; | ||
720 | } | ||
721 | |||
722 | rio_addr += sg_dma_len(sg); | ||
723 | |||
724 | if (!first) | ||
725 | first = desc; | ||
726 | else | ||
727 | list_add_tail(&desc->desc_node, &first->tx_list); | ||
728 | } | ||
729 | |||
730 | first->txd.cookie = -EBUSY; | ||
731 | desc->txd.flags = flags; | ||
732 | |||
733 | return &first->txd; | ||
734 | |||
735 | err_desc_get: | ||
736 | tsi721_desc_put(bdma_chan, first); | ||
737 | return NULL; | ||
738 | } | ||
739 | |||
740 | static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | ||
741 | unsigned long arg) | ||
742 | { | ||
743 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
744 | struct tsi721_tx_desc *desc, *_d; | ||
745 | LIST_HEAD(list); | ||
746 | |||
747 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | ||
748 | |||
749 | if (cmd != DMA_TERMINATE_ALL) | ||
750 | return -ENXIO; | ||
751 | |||
752 | spin_lock_bh(&bdma_chan->lock); | ||
753 | |||
754 | /* make sure to stop the transfer */ | ||
755 | iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); | ||
756 | |||
757 | list_splice_init(&bdma_chan->active_list, &list); | ||
758 | list_splice_init(&bdma_chan->queue, &list); | ||
759 | |||
760 | list_for_each_entry_safe(desc, _d, &list, desc_node) | ||
761 | tsi721_dma_chain_complete(bdma_chan, desc); | ||
762 | |||
763 | spin_unlock_bh(&bdma_chan->lock); | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | int __devinit tsi721_register_dma(struct tsi721_device *priv) | ||
769 | { | ||
770 | int i; | ||
771 | int nr_channels = TSI721_DMA_MAXCH; | ||
772 | int err; | ||
773 | struct rio_mport *mport = priv->mport; | ||
774 | |||
775 | mport->dma.dev = &priv->pdev->dev; | ||
776 | mport->dma.chancnt = nr_channels; | ||
777 | |||
778 | INIT_LIST_HEAD(&mport->dma.channels); | ||
779 | |||
780 | for (i = 0; i < nr_channels; i++) { | ||
781 | struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; | ||
782 | |||
783 | if (i == TSI721_DMACH_MAINT) | ||
784 | continue; | ||
785 | |||
786 | bdma_chan->bd_num = 64; | ||
787 | bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); | ||
788 | |||
789 | bdma_chan->dchan.device = &mport->dma; | ||
790 | bdma_chan->dchan.cookie = 1; | ||
791 | bdma_chan->dchan.chan_id = i; | ||
792 | bdma_chan->id = i; | ||
793 | |||
794 | spin_lock_init(&bdma_chan->lock); | ||
795 | |||
796 | INIT_LIST_HEAD(&bdma_chan->active_list); | ||
797 | INIT_LIST_HEAD(&bdma_chan->queue); | ||
798 | INIT_LIST_HEAD(&bdma_chan->free_list); | ||
799 | |||
800 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, | ||
801 | (unsigned long)bdma_chan); | ||
802 | tasklet_disable(&bdma_chan->tasklet); | ||
803 | list_add_tail(&bdma_chan->dchan.device_node, | ||
804 | &mport->dma.channels); | ||
805 | } | ||
806 | |||
807 | dma_cap_zero(mport->dma.cap_mask); | ||
808 | dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); | ||
809 | dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); | ||
810 | |||
811 | mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; | ||
812 | mport->dma.device_free_chan_resources = tsi721_free_chan_resources; | ||
813 | mport->dma.device_tx_status = tsi721_tx_status; | ||
814 | mport->dma.device_issue_pending = tsi721_issue_pending; | ||
815 | mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; | ||
816 | mport->dma.device_control = tsi721_device_control; | ||
817 | |||
818 | err = dma_async_device_register(&mport->dma); | ||
819 | if (err) | ||
820 | dev_err(&priv->pdev->dev, "Failed to register DMA device\n"); | ||
821 | |||
822 | return err; | ||
823 | } | ||