aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/vme
diff options
context:
space:
mode:
authorMartyn Welch <martyn.welch@ge.com>2010-02-18 10:13:25 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2010-03-03 19:43:01 -0500
commit4860ab74d4d577d21fbfe0da3bd0925f3efc8907 (patch)
tree23fa3b8c7b266c3cf851e855efb11a50af4597c6 /drivers/staging/vme
parent2b82beb8c1bc81b3dde69d16cacbc22546681acf (diff)
Staging: vme: add ca91cx42 dma support
Add support for the DMA controller in the ca91cx42 bridge. Signed-off-by: Martyn Welch <martyn.welch@ge.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/vme')
-rw-r--r--drivers/staging/vme/TODO1
-rw-r--r--drivers/staging/vme/bridges/Kconfig1
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c599
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.h93
4 files changed, 327 insertions, 367 deletions
diff --git a/drivers/staging/vme/TODO b/drivers/staging/vme/TODO
index 723b67b08b9..f0dba3e4104 100644
--- a/drivers/staging/vme/TODO
+++ b/drivers/staging/vme/TODO
@@ -56,7 +56,6 @@ Tempe (tsi148)
56Universe II (ca91c142) 56Universe II (ca91c142)
57---------------------- 57----------------------
58 58
59- DMA unsupported.
60- RMW transactions unsupported. 59- RMW transactions unsupported.
61- Mailboxes unsupported. 60- Mailboxes unsupported.
62- Error Detection. 61- Error Detection.
diff --git a/drivers/staging/vme/bridges/Kconfig b/drivers/staging/vme/bridges/Kconfig
index 66c49f5d8db..9331064e047 100644
--- a/drivers/staging/vme/bridges/Kconfig
+++ b/drivers/staging/vme/bridges/Kconfig
@@ -2,6 +2,7 @@ comment "VME Bridge Drivers"
2 2
3config VME_CA91CX42 3config VME_CA91CX42
4 tristate "Universe II" 4 tristate "Universe II"
5 depends on VIRT_TO_BUS
5 help 6 help
6 If you say Y here you get support for the Tundra CA91C142 7 If you say Y here you get support for the Tundra CA91C142
7 (Universe II) VME bridge chip. 8 (Universe II) VME bridge chip.
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index aeb11d5f919..eddf071df40 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -592,8 +592,8 @@ err_name:
592} 592}
593 593
594/* 594/*
595 * * Free and unmap PCI Resource 595 * Free and unmap PCI Resource
596 * */ 596 */
597static void ca91cx42_free_resource(struct vme_master_resource *image) 597static void ca91cx42_free_resource(struct vme_master_resource *image)
598{ 598{
599 iounmap(image->kern_base); 599 iounmap(image->kern_base);
@@ -899,6 +899,261 @@ ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
899 return retval; 899 return retval;
900} 900}
901 901
902int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
903 struct vme_dma_attr *dest, size_t count)
904{
905 struct ca91cx42_dma_entry *entry, *prev;
906 struct vme_dma_pci *pci_attr;
907 struct vme_dma_vme *vme_attr;
908 dma_addr_t desc_ptr;
909 int retval = 0;
910
911 /* XXX descriptor must be aligned on 64-bit boundaries */
912 entry = (struct ca91cx42_dma_entry *)
913 kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
914 if (entry == NULL) {
915 printk(KERN_ERR "Failed to allocate memory for dma resource "
916 "structure\n");
917 retval = -ENOMEM;
918 goto err_mem;
919 }
920
921 /* Test descriptor alignment */
922 if ((unsigned long)&(entry->descriptor) & CA91CX42_DCPP_M) {
923 printk("Descriptor not aligned to 16 byte boundary as "
924 "required: %p\n", &(entry->descriptor));
925 retval = -EINVAL;
926 goto err_align;
927 }
928
929 memset(&(entry->descriptor), 0, sizeof(struct ca91cx42_dma_descriptor));
930
931 if (dest->type == VME_DMA_VME) {
932 entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
933 vme_attr = (struct vme_dma_vme *)dest->private;
934 pci_attr = (struct vme_dma_pci *)src->private;
935 } else {
936 vme_attr = (struct vme_dma_vme *)src->private;
937 pci_attr = (struct vme_dma_pci *)dest->private;
938 }
939
940 /* Check we can do fullfill required attributes */
941 if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
942 VME_USER2)) != 0) {
943
944 printk(KERN_ERR "Unsupported cycle type\n");
945 retval = -EINVAL;
946 goto err_aspace;
947 }
948
949 if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
950 VME_PROG | VME_DATA)) != 0) {
951
952 printk(KERN_ERR "Unsupported cycle type\n");
953 retval = -EINVAL;
954 goto err_cycle;
955 }
956
957 /* Check to see if we can fullfill source and destination */
958 if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
959 ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
960
961 printk(KERN_ERR "Cannot perform transfer with this "
962 "source-destination combination\n");
963 retval = -EINVAL;
964 goto err_direct;
965 }
966
967 /* Setup cycle types */
968 if (vme_attr->cycle & VME_BLT)
969 entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
970
971 /* Setup data width */
972 switch (vme_attr->dwidth) {
973 case VME_D8:
974 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
975 break;
976 case VME_D16:
977 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
978 break;
979 case VME_D32:
980 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
981 break;
982 case VME_D64:
983 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
984 break;
985 default:
986 printk(KERN_ERR "Invalid data width\n");
987 return -EINVAL;
988 }
989
990 /* Setup address space */
991 switch (vme_attr->aspace) {
992 case VME_A16:
993 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
994 break;
995 case VME_A24:
996 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
997 break;
998 case VME_A32:
999 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1000 break;
1001 case VME_USER1:
1002 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1003 break;
1004 case VME_USER2:
1005 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1006 break;
1007 default:
1008 printk(KERN_ERR "Invalid address space\n");
1009 return -EINVAL;
1010 break;
1011 }
1012
1013 if (vme_attr->cycle & VME_SUPER)
1014 entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1015 if (vme_attr->cycle & VME_PROG)
1016 entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1017
1018 entry->descriptor.dtbc = count;
1019 entry->descriptor.dla = pci_attr->address;
1020 entry->descriptor.dva = vme_attr->address;
1021 entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1022
1023 /* Add to list */
1024 list_add_tail(&(entry->list), &(list->entries));
1025
1026 /* Fill out previous descriptors "Next Address" */
1027 if (entry->list.prev != &(list->entries)) {
1028 prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1029 list);
1030 /* We need the bus address for the pointer */
1031 desc_ptr = virt_to_bus(&(entry->descriptor));
1032 prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1033 }
1034
1035 return 0;
1036
1037err_cycle:
1038err_aspace:
1039err_direct:
1040err_align:
1041 kfree(entry);
1042err_mem:
1043 return retval;
1044}
1045
1046static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1047{
1048 u32 tmp;
1049 struct ca91cx42_driver *bridge;
1050
1051 bridge = ca91cx42_bridge->driver_priv;
1052
1053 tmp = ioread32(bridge->base + DGCS);
1054
1055 if (tmp & CA91CX42_DGCS_ACT)
1056 return 0;
1057 else
1058 return 1;
1059}
1060
1061int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1062{
1063 struct vme_dma_resource *ctrlr;
1064 struct ca91cx42_dma_entry *entry;
1065 int retval = 0;
1066 dma_addr_t bus_addr;
1067 u32 val;
1068
1069 struct ca91cx42_driver *bridge;
1070
1071 ctrlr = list->parent;
1072
1073 bridge = ctrlr->parent->driver_priv;
1074
1075 mutex_lock(&(ctrlr->mtx));
1076
1077 if (!(list_empty(&(ctrlr->running)))) {
1078 /*
1079 * XXX We have an active DMA transfer and currently haven't
1080 * sorted out the mechanism for "pending" DMA transfers.
1081 * Return busy.
1082 */
1083 /* Need to add to pending here */
1084 mutex_unlock(&(ctrlr->mtx));
1085 return -EBUSY;
1086 } else {
1087 list_add(&(list->list), &(ctrlr->running));
1088 }
1089
1090 /* Get first bus address and write into registers */
1091 entry = list_first_entry(&(list->entries), struct ca91cx42_dma_entry,
1092 list);
1093
1094 bus_addr = virt_to_bus(&(entry->descriptor));
1095
1096 mutex_unlock(&(ctrlr->mtx));
1097
1098 iowrite32(0, bridge->base + DTBC);
1099 iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1100
1101 /* Start the operation */
1102 val = ioread32(bridge->base + DGCS);
1103
1104 /* XXX Could set VMEbus On and Off Counters here */
1105 val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1106
1107 val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1108 CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1109 CA91CX42_DGCS_PERR);
1110
1111 iowrite32(val, bridge->base + DGCS);
1112
1113 val |= CA91CX42_DGCS_GO;
1114
1115 iowrite32(val, bridge->base + DGCS);
1116
1117 wait_event_interruptible(bridge->dma_queue,
1118 ca91cx42_dma_busy(ctrlr->parent));
1119
1120 /*
1121 * Read status register, this register is valid until we kick off a
1122 * new transfer.
1123 */
1124 val = ioread32(bridge->base + DGCS);
1125
1126 if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1127 CA91CX42_DGCS_PERR)) {
1128
1129 printk(KERN_ERR "ca91c042: DMA Error. DGCS=%08X\n", val);
1130 val = ioread32(bridge->base + DCTL);
1131 }
1132
1133 /* Remove list from running list */
1134 mutex_lock(&(ctrlr->mtx));
1135 list_del(&(list->list));
1136 mutex_unlock(&(ctrlr->mtx));
1137
1138 return retval;
1139
1140}
1141
1142int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1143{
1144 struct list_head *pos, *temp;
1145 struct ca91cx42_dma_entry *entry;
1146
1147 /* detach and free each entry */
1148 list_for_each_safe(pos, temp, &(list->entries)) {
1149 list_del(pos);
1150 entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1151 kfree(entry);
1152 }
1153
1154 return 0;
1155}
1156
902/* 1157/*
903 * All 4 location monitors reside at the same base - this is therefore a 1158 * All 4 location monitors reside at the same base - this is therefore a
904 * system wide configuration. 1159 * system wide configuration.
@@ -1203,9 +1458,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1203 struct ca91cx42_driver *ca91cx42_device; 1458 struct ca91cx42_driver *ca91cx42_device;
1204 struct vme_master_resource *master_image; 1459 struct vme_master_resource *master_image;
1205 struct vme_slave_resource *slave_image; 1460 struct vme_slave_resource *slave_image;
1206#if 0
1207 struct vme_dma_resource *dma_ctrlr; 1461 struct vme_dma_resource *dma_ctrlr;
1208#endif
1209 struct vme_lm_resource *lm; 1462 struct vme_lm_resource *lm;
1210 1463
1211 /* We want to support more than one of each bridge so we need to 1464 /* We want to support more than one of each bridge so we need to
@@ -1336,7 +1589,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1336 list_add_tail(&(slave_image->list), 1589 list_add_tail(&(slave_image->list),
1337 &(ca91cx42_bridge->slave_resources)); 1590 &(ca91cx42_bridge->slave_resources));
1338 } 1591 }
1339#if 0 1592
1340 /* Add dma engines to list */ 1593 /* Add dma engines to list */
1341 INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources)); 1594 INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1342 for (i = 0; i < CA91C142_MAX_DMA; i++) { 1595 for (i = 0; i < CA91C142_MAX_DMA; i++) {
@@ -1359,7 +1612,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1359 list_add_tail(&(dma_ctrlr->list), 1612 list_add_tail(&(dma_ctrlr->list),
1360 &(ca91cx42_bridge->dma_resources)); 1613 &(ca91cx42_bridge->dma_resources));
1361 } 1614 }
1362#endif 1615
1363 /* Add location monitor to list */ 1616 /* Add location monitor to list */
1364 INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources)); 1617 INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1365 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL); 1618 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
@@ -1384,10 +1637,10 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1384 ca91cx42_bridge->master_write = ca91cx42_master_write; 1637 ca91cx42_bridge->master_write = ca91cx42_master_write;
1385#if 0 1638#if 0
1386 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw; 1639 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1640#endif
1387 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add; 1641 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1388 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec; 1642 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1389 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty; 1643 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1390#endif
1391 ca91cx42_bridge->irq_set = ca91cx42_irq_set; 1644 ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1392 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate; 1645 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1393 ca91cx42_bridge->lm_set = ca91cx42_lm_set; 1646 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
@@ -1436,7 +1689,6 @@ err_lm:
1436 list_del(pos); 1689 list_del(pos);
1437 kfree(lm); 1690 kfree(lm);
1438 } 1691 }
1439#if 0
1440err_dma: 1692err_dma:
1441 /* resources are stored in link list */ 1693 /* resources are stored in link list */
1442 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) { 1694 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
@@ -1444,7 +1696,6 @@ err_dma:
1444 list_del(pos); 1696 list_del(pos);
1445 kfree(dma_ctrlr); 1697 kfree(dma_ctrlr);
1446 } 1698 }
1447#endif
1448err_slave: 1699err_slave:
1449 /* resources are stored in link list */ 1700 /* resources are stored in link list */
1450 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) { 1701 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
@@ -1575,7 +1826,6 @@ module_exit(ca91cx42_exit);
1575 *--------------------------------------------------------------------------*/ 1826 *--------------------------------------------------------------------------*/
1576 1827
1577#if 0 1828#if 0
1578#define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1579 1829
1580int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw) 1830int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1581{ 1831{
@@ -1659,335 +1909,6 @@ int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1659 return 0; 1909 return 0;
1660} 1910}
1661 1911
1662int uniSetupDctlReg(vmeDmaPacket_t * vmeDma, int *dctlregreturn)
1663{
1664 unsigned int dctlreg = 0x80;
1665 struct vmeAttr *vmeAttr;
1666
1667 if (vmeDma->srcBus == VME_DMA_VME) {
1668 dctlreg = 0;
1669 vmeAttr = &vmeDma->srcVmeAttr;
1670 } else {
1671 dctlreg = 0x80000000;
1672 vmeAttr = &vmeDma->dstVmeAttr;
1673 }
1674
1675 switch (vmeAttr->maxDataWidth) {
1676 case VME_D8:
1677 break;
1678 case VME_D16:
1679 dctlreg |= 0x00400000;
1680 break;
1681 case VME_D32:
1682 dctlreg |= 0x00800000;
1683 break;
1684 case VME_D64:
1685 dctlreg |= 0x00C00000;
1686 break;
1687 }
1688
1689 switch (vmeAttr->addrSpace) {
1690 case VME_A16:
1691 break;
1692 case VME_A24:
1693 dctlreg |= 0x00010000;
1694 break;
1695 case VME_A32:
1696 dctlreg |= 0x00020000;
1697 break;
1698 case VME_USER1:
1699 dctlreg |= 0x00060000;
1700 break;
1701 case VME_USER2:
1702 dctlreg |= 0x00070000;
1703 break;
1704
1705 case VME_A64: /* not supported in Universe DMA */
1706 case VME_CRCSR:
1707 case VME_USER3:
1708 case VME_USER4:
1709 return -EINVAL;
1710 break;
1711 }
1712 if (vmeAttr->userAccessType == VME_PROG) {
1713 dctlreg |= 0x00004000;
1714 }
1715 if (vmeAttr->dataAccessType == VME_SUPER) {
1716 dctlreg |= 0x00001000;
1717 }
1718 if (vmeAttr->xferProtocol != VME_SCT) {
1719 dctlreg |= 0x00000100;
1720 }
1721 *dctlregreturn = dctlreg;
1722 return 0;
1723}
1724
1725unsigned int
1726ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1727{
1728 unsigned int val;
1729
1730 /* Setup registers as needed for direct or chained. */
1731 if (dgcsreg & 0x8000000) {
1732 iowrite32(0, bridge->base + DTBC);
1733 iowrite32((unsigned int)vmeLL, bridge->base + DCPP);
1734 } else {
1735#if 0
1736 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
1737 printk(KERN_ERR "Starting: DVA = %08x\n",
1738 ioread32(&vmeLL->dva));
1739 printk(KERN_ERR "Starting: DLV = %08x\n",
1740 ioread32(&vmeLL->dlv));
1741 printk(KERN_ERR "Starting: DTBC = %08x\n",
1742 ioread32(&vmeLL->dtbc));
1743 printk(KERN_ERR "Starting: DCTL = %08x\n",
1744 ioread32(&vmeLL->dctl));
1745#endif
1746 /* Write registers */
1747 iowrite32(ioread32(&vmeLL->dva), bridge->base + DVA);
1748 iowrite32(ioread32(&vmeLL->dlv), bridge->base + DLA);
1749 iowrite32(ioread32(&vmeLL->dtbc), bridge->base + DTBC);
1750 iowrite32(ioread32(&vmeLL->dctl), bridge->base + DCTL);
1751 iowrite32(0, bridge->base + DCPP);
1752 }
1753
1754 /* Start the operation */
1755 iowrite32(dgcsreg, bridge->base + DGCS);
1756 val = get_tbl();
1757 iowrite32(dgcsreg | 0x8000000F, bridge->base + DGCS);
1758 return val;
1759}
1760
1761TDMA_Cmd_Packet *ca91cx42_setup_dma(vmeDmaPacket_t * vmeDma)
1762{
1763 vmeDmaPacket_t *vmeCur;
1764 int maxPerPage;
1765 int currentLLcount;
1766 TDMA_Cmd_Packet *startLL;
1767 TDMA_Cmd_Packet *currentLL;
1768 TDMA_Cmd_Packet *nextLL;
1769 unsigned int dctlreg = 0;
1770
1771 maxPerPage = PAGESIZE / sizeof(TDMA_Cmd_Packet) - 1;
1772 startLL = (TDMA_Cmd_Packet *) __get_free_pages(GFP_KERNEL, 0);
1773 if (startLL == 0) {
1774 return startLL;
1775 }
1776 /* First allocate pages for descriptors and create linked list */
1777 vmeCur = vmeDma;
1778 currentLL = startLL;
1779 currentLLcount = 0;
1780 while (vmeCur != 0) {
1781 if (vmeCur->pNextPacket != 0) {
1782 currentLL->dcpp = (unsigned int)(currentLL + 1);
1783 currentLLcount++;
1784 if (currentLLcount >= maxPerPage) {
1785 currentLL->dcpp =
1786 __get_free_pages(GFP_KERNEL, 0);
1787 currentLLcount = 0;
1788 }
1789 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1790 } else {
1791 currentLL->dcpp = (unsigned int)0;
1792 }
1793 vmeCur = vmeCur->pNextPacket;
1794 }
1795
1796 /* Next fill in information for each descriptor */
1797 vmeCur = vmeDma;
1798 currentLL = startLL;
1799 while (vmeCur != 0) {
1800 if (vmeCur->srcBus == VME_DMA_VME) {
1801 iowrite32(vmeCur->srcAddr, &currentLL->dva);
1802 iowrite32(vmeCur->dstAddr, &currentLL->dlv);
1803 } else {
1804 iowrite32(vmeCur->srcAddr, &currentLL->dlv);
1805 iowrite32(vmeCur->dstAddr, &currentLL->dva);
1806 }
1807 uniSetupDctlReg(vmeCur, &dctlreg);
1808 iowrite32(dctlreg, &currentLL->dctl);
1809 iowrite32(vmeCur->byteCount, &currentLL->dtbc);
1810
1811 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1812 vmeCur = vmeCur->pNextPacket;
1813 }
1814
1815 /* Convert Links to PCI addresses. */
1816 currentLL = startLL;
1817 while (currentLL != 0) {
1818 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1819 if (nextLL == 0) {
1820 iowrite32(1, &currentLL->dcpp);
1821 } else {
1822 iowrite32((unsigned int)virt_to_bus(nextLL),
1823 &currentLL->dcpp);
1824 }
1825 currentLL = nextLL;
1826 }
1827
1828 /* Return pointer to descriptors list */
1829 return startLL;
1830}
1831
1832int ca91cx42_free_dma(TDMA_Cmd_Packet *startLL)
1833{
1834 TDMA_Cmd_Packet *currentLL;
1835 TDMA_Cmd_Packet *prevLL;
1836 TDMA_Cmd_Packet *nextLL;
1837 unsigned int dcppreg;
1838
1839 /* Convert Links to virtual addresses. */
1840 currentLL = startLL;
1841 while (currentLL != 0) {
1842 dcppreg = ioread32(&currentLL->dcpp);
1843 dcppreg &= ~6;
1844 if (dcppreg & 1) {
1845 currentLL->dcpp = 0;
1846 } else {
1847 currentLL->dcpp = (unsigned int)bus_to_virt(dcppreg);
1848 }
1849 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1850 }
1851
1852 /* Free all pages associated with the descriptors. */
1853 currentLL = startLL;
1854 prevLL = currentLL;
1855 while (currentLL != 0) {
1856 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1857 if (currentLL + 1 != nextLL) {
1858 free_pages((int)prevLL, 0);
1859 prevLL = nextLL;
1860 }
1861 currentLL = nextLL;
1862 }
1863
1864 /* Return pointer to descriptors list */
1865 return 0;
1866}
1867
1868int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1869{
1870 unsigned int dgcsreg = 0;
1871 unsigned int dctlreg = 0;
1872 int val;
1873 int channel, x;
1874 vmeDmaPacket_t *curDma;
1875 TDMA_Cmd_Packet *dmaLL;
1876
1877 /* Sanity check the VME chain. */
1878 channel = vmeDma->channel_number;
1879 if (channel > 0) {
1880 return -EINVAL;
1881 }
1882 curDma = vmeDma;
1883 while (curDma != 0) {
1884 if (curDma->byteCount == 0) {
1885 return -EINVAL;
1886 }
1887 if (curDma->byteCount >= 0x1000000) {
1888 return -EINVAL;
1889 }
1890 if ((curDma->srcAddr & 7) != (curDma->dstAddr & 7)) {
1891 return -EINVAL;
1892 }
1893 switch (curDma->srcBus) {
1894 case VME_DMA_PCI:
1895 if (curDma->dstBus != VME_DMA_VME) {
1896 return -EINVAL;
1897 }
1898 break;
1899 case VME_DMA_VME:
1900 if (curDma->dstBus != VME_DMA_PCI) {
1901 return -EINVAL;
1902 }
1903 break;
1904 default:
1905 return -EINVAL;
1906 break;
1907 }
1908 if (uniSetupDctlReg(curDma, &dctlreg) < 0) {
1909 return -EINVAL;
1910 }
1911
1912 curDma = curDma->pNextPacket;
1913 if (curDma == vmeDma) { /* Endless Loop! */
1914 return -EINVAL;
1915 }
1916 }
1917
1918 /* calculate control register */
1919 if (vmeDma->pNextPacket != 0) {
1920 dgcsreg = 0x8000000;
1921 } else {
1922 dgcsreg = 0;
1923 }
1924
1925 for (x = 0; x < 8; x++) { /* vme block size */
1926 if ((256 << x) >= vmeDma->maxVmeBlockSize) {
1927 break;
1928 }
1929 }
1930 if (x == 8)
1931 x = 7;
1932 dgcsreg |= (x << 20);
1933
1934 if (vmeDma->vmeBackOffTimer) {
1935 for (x = 1; x < 8; x++) { /* vme timer */
1936 if ((16 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1937 break;
1938 }
1939 }
1940 if (x == 8)
1941 x = 7;
1942 dgcsreg |= (x << 16);
1943 }
1944 /*` Setup the dma chain */
1945 dmaLL = ca91cx42_setup_dma(vmeDma);
1946
1947 /* Start the DMA */
1948 if (dgcsreg & 0x8000000) {
1949 vmeDma->vmeDmaStartTick =
1950 ca91cx42_start_dma(channel, dgcsreg,
1951 (TDMA_Cmd_Packet *) virt_to_phys(dmaLL));
1952 } else {
1953 vmeDma->vmeDmaStartTick =
1954 ca91cx42_start_dma(channel, dgcsreg, dmaLL);
1955 }
1956
1957 wait_event_interruptible(dma_queue,
1958 ioread32(bridge->base + DGCS) & 0x800);
1959
1960 val = ioread32(bridge->base + DGCS);
1961 iowrite32(val | 0xF00, bridge->base + DGCS);
1962
1963 vmeDma->vmeDmaStatus = 0;
1964
1965 if (!(val & 0x00000800)) {
1966 vmeDma->vmeDmaStatus = val & 0x700;
1967 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1968 " DGCS=%08X\n", val);
1969 val = ioread32(bridge->base + DCPP);
1970 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
1971 val = ioread32(bridge->base + DCTL);
1972 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
1973 val = ioread32(bridge->base + DTBC);
1974 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
1975 val = ioread32(bridge->base + DLA);
1976 printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
1977 val = ioread32(bridge->base + DVA);
1978 printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1979
1980 }
1981 /* Free the dma chain */
1982 ca91cx42_free_dma(dmaLL);
1983
1984 return 0;
1985}
1986
1987
1988
1989
1990
1991int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb) 1912int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1992{ 1913{
1993 int temp_ctl = 0; 1914 int temp_ctl = 0;
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.h b/drivers/staging/vme/bridges/vme_ca91cx42.h
index b9b63088668..221d20f0b92 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.h
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.h
@@ -57,7 +57,7 @@ struct ca91cx42_driver {
57struct ca91cx42_dma_descriptor { 57struct ca91cx42_dma_descriptor {
58 unsigned int dctl; /* DMA Control */ 58 unsigned int dctl; /* DMA Control */
59 unsigned int dtbc; /* Transfer Byte Count */ 59 unsigned int dtbc; /* Transfer Byte Count */
60 unsigned int dlv; /* PCI Address */ 60 unsigned int dla; /* PCI Address */
61 unsigned int res1; /* Reserved */ 61 unsigned int res1; /* Reserved */
62 unsigned int dva; /* Vme Address */ 62 unsigned int dva; /* Vme Address */
63 unsigned int res2; /* Reserved */ 63 unsigned int res2; /* Reserved */
@@ -253,32 +253,6 @@ static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
253#define VCSR_SET 0x0FF8 253#define VCSR_SET 0x0FF8
254#define VCSR_BS 0x0FFC 254#define VCSR_BS 0x0FFC
255 255
256// DMA General Control/Status Register DGCS (0x220)
257// 32-24 || GO | STOPR | HALTR | 0 || CHAIN | 0 | 0 | 0 ||
258// 23-16 || VON || VOFF ||
259// 15-08 || ACT | STOP | HALT | 0 || DONE | LERR | VERR | P_ERR ||
260// 07-00 || 0 | INT_S | INT_H | 0 || I_DNE | I_LER | I_VER | I_PER ||
261
262// VON - Length Per DMA VMEBus Transfer
263// 0000 = None
264// 0001 = 256 Bytes
265// 0010 = 512
266// 0011 = 1024
267// 0100 = 2048
268// 0101 = 4096
269// 0110 = 8192
270// 0111 = 16384
271
272// VOFF - wait between DMA tenures
273// 0000 = 0 us
274// 0001 = 16
275// 0010 = 32
276// 0011 = 64
277// 0100 = 128
278// 0101 = 256
279// 0110 = 512
280// 0111 = 1024
281
282/* 256/*
283 * PCI Class Register 257 * PCI Class Register
284 * offset 008 258 * offset 008
@@ -371,6 +345,71 @@ static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
371#define CA91CX42_BM_SLSI_RESERVED 0x3F0F0000 345#define CA91CX42_BM_SLSI_RESERVED 0x3F0F0000
372 346
373/* 347/*
348 * DCTL Register
349 * offset 200
350 */
351#define CA91CX42_DCTL_L2V (1<<31)
352#define CA91CX42_DCTL_VDW_M (3<<22)
353#define CA91CX42_DCTL_VDW_M (3<<22)
354#define CA91CX42_DCTL_VDW_D8 0
355#define CA91CX42_DCTL_VDW_D16 (1<<22)
356#define CA91CX42_DCTL_VDW_D32 (1<<23)
357#define CA91CX42_DCTL_VDW_D64 (3<<22)
358
359#define CA91CX42_DCTL_VAS_M (7<<16)
360#define CA91CX42_DCTL_VAS_A16 0
361#define CA91CX42_DCTL_VAS_A24 (1<<16)
362#define CA91CX42_DCTL_VAS_A32 (1<<17)
363#define CA91CX42_DCTL_VAS_USER1 (3<<17)
364#define CA91CX42_DCTL_VAS_USER2 (7<<16)
365
366#define CA91CX42_DCTL_PGM_M (1<<14)
367#define CA91CX42_DCTL_PGM_DATA 0
368#define CA91CX42_DCTL_PGM_PGM (1<<14)
369
370#define CA91CX42_DCTL_SUPER_M (1<<12)
371#define CA91CX42_DCTL_SUPER_NPRIV 0
372#define CA91CX42_DCTL_SUPER_SUPR (1<<12)
373
374#define CA91CX42_DCTL_VCT_M (1<<8)
375#define CA91CX42_DCTL_VCT_BLT (1<<8)
376#define CA91CX42_DCTL_LD64EN (1<<7)
377
378/*
379 * DCPP Register
380 * offset 218
381 */
382#define CA91CX42_DCPP_M 0xf
383#define CA91CX42_DCPP_NULL (1<<0)
384
385/*
386 * DMA General Control/Status Register (DGCS)
387 * offset 220
388 */
389#define CA91CX42_DGCS_GO (1<<31)
390#define CA91CX42_DGCS_STOP_REQ (1<<30)
391#define CA91CX42_DGCS_HALT_REQ (1<<29)
392#define CA91CX42_DGCS_CHAIN (1<<27)
393
394#define CA91CX42_DGCS_VON_M (7<<20)
395
396#define CA91CX42_DGCS_VOFF_M (0xf<<16)
397
398#define CA91CX42_DGCS_ACT (1<<15)
399#define CA91CX42_DGCS_STOP (1<<14)
400#define CA91CX42_DGCS_HALT (1<<13)
401#define CA91CX42_DGCS_DONE (1<<11)
402#define CA91CX42_DGCS_LERR (1<<10)
403#define CA91CX42_DGCS_VERR (1<<9)
404#define CA91CX42_DGCS_PERR (1<<8)
405#define CA91CX42_DGCS_INT_STOP (1<<6)
406#define CA91CX42_DGCS_INT_HALT (1<<5)
407#define CA91CX42_DGCS_INT_DONE (1<<3)
408#define CA91CX42_DGCS_INT_LERR (1<<2)
409#define CA91CX42_DGCS_INT_VERR (1<<1)
410#define CA91CX42_DGCS_INT_PERR (1<<0)
411
412/*
374 * PCI Interrupt Enable Register 413 * PCI Interrupt Enable Register
375 * offset 300 414 * offset 300
376 */ 415 */