diff options
Diffstat (limited to 'arch/arm/mach-bcmring/dma.c')
-rw-r--r-- | arch/arm/mach-bcmring/dma.c | 812 |
1 files changed, 0 insertions, 812 deletions
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c index 1a1a27dd5654..1024396797e1 100644 --- a/arch/arm/mach-bcmring/dma.c +++ b/arch/arm/mach-bcmring/dma.c | |||
@@ -33,17 +33,11 @@ | |||
33 | 33 | ||
34 | #include <mach/timer.h> | 34 | #include <mach/timer.h> |
35 | 35 | ||
36 | #include <linux/mm.h> | ||
37 | #include <linux/pfn.h> | 36 | #include <linux/pfn.h> |
38 | #include <linux/atomic.h> | 37 | #include <linux/atomic.h> |
39 | #include <linux/sched.h> | 38 | #include <linux/sched.h> |
40 | #include <mach/dma.h> | 39 | #include <mach/dma.h> |
41 | 40 | ||
42 | /* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */ | ||
43 | /* especially since dc4 doesn't use kmalloc'd memory. */ | ||
44 | |||
45 | #define ALLOW_MAP_OF_KMALLOC_MEMORY 0 | ||
46 | |||
47 | /* ---- Public Variables ------------------------------------------------- */ | 41 | /* ---- Public Variables ------------------------------------------------- */ |
48 | 42 | ||
49 | /* ---- Private Constants and Types -------------------------------------- */ | 43 | /* ---- Private Constants and Types -------------------------------------- */ |
@@ -53,24 +47,12 @@ | |||
53 | #define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) | 47 | #define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) |
54 | #define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) | 48 | #define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) |
55 | 49 | ||
56 | #define DMA_MAP_DEBUG 0 | ||
57 | |||
58 | #if DMA_MAP_DEBUG | ||
59 | # define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args) | ||
60 | #else | ||
61 | # define DMA_MAP_PRINT(fmt, args...) | ||
62 | #endif | ||
63 | 50 | ||
64 | /* ---- Private Variables ------------------------------------------------ */ | 51 | /* ---- Private Variables ------------------------------------------------ */ |
65 | 52 | ||
66 | static DMA_Global_t gDMA; | 53 | static DMA_Global_t gDMA; |
67 | static struct proc_dir_entry *gDmaDir; | 54 | static struct proc_dir_entry *gDmaDir; |
68 | 55 | ||
69 | static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0); | ||
70 | static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0); | ||
71 | static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0); | ||
72 | static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0); | ||
73 | |||
74 | #include "dma_device.c" | 56 | #include "dma_device.c" |
75 | 57 | ||
76 | /* ---- Private Function Prototypes -------------------------------------- */ | 58 | /* ---- Private Function Prototypes -------------------------------------- */ |
@@ -79,34 +61,6 @@ static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0); | |||
79 | 61 | ||
80 | /****************************************************************************/ | 62 | /****************************************************************************/ |
81 | /** | 63 | /** |
82 | * Displays information for /proc/dma/mem-type | ||
83 | */ | ||
84 | /****************************************************************************/ | ||
85 | |||
86 | static int dma_proc_read_mem_type(char *buf, char **start, off_t offset, | ||
87 | int count, int *eof, void *data) | ||
88 | { | ||
89 | int len = 0; | ||
90 | |||
91 | len += sprintf(buf + len, "dma_map_mem statistics\n"); | ||
92 | len += | ||
93 | sprintf(buf + len, "coherent: %d\n", | ||
94 | atomic_read(&gDmaStatMemTypeCoherent)); | ||
95 | len += | ||
96 | sprintf(buf + len, "kmalloc: %d\n", | ||
97 | atomic_read(&gDmaStatMemTypeKmalloc)); | ||
98 | len += | ||
99 | sprintf(buf + len, "vmalloc: %d\n", | ||
100 | atomic_read(&gDmaStatMemTypeVmalloc)); | ||
101 | len += | ||
102 | sprintf(buf + len, "user: %d\n", | ||
103 | atomic_read(&gDmaStatMemTypeUser)); | ||
104 | |||
105 | return len; | ||
106 | } | ||
107 | |||
108 | /****************************************************************************/ | ||
109 | /** | ||
110 | * Displays information for /proc/dma/channels | 64 | * Displays information for /proc/dma/channels |
111 | */ | 65 | */ |
112 | /****************************************************************************/ | 66 | /****************************************************************************/ |
@@ -846,8 +800,6 @@ int dma_init(void) | |||
846 | dma_proc_read_channels, NULL); | 800 | dma_proc_read_channels, NULL); |
847 | create_proc_read_entry("devices", 0, gDmaDir, | 801 | create_proc_read_entry("devices", 0, gDmaDir, |
848 | dma_proc_read_devices, NULL); | 802 | dma_proc_read_devices, NULL); |
849 | create_proc_read_entry("mem-type", 0, gDmaDir, | ||
850 | dma_proc_read_mem_type, NULL); | ||
851 | } | 803 | } |
852 | 804 | ||
853 | out: | 805 | out: |
@@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for. | |||
1565 | } | 1517 | } |
1566 | 1518 | ||
1567 | EXPORT_SYMBOL(dma_set_device_handler); | 1519 | EXPORT_SYMBOL(dma_set_device_handler); |
1568 | |||
1569 | /****************************************************************************/ | ||
1570 | /** | ||
1571 | * Initializes a memory mapping structure | ||
1572 | */ | ||
1573 | /****************************************************************************/ | ||
1574 | |||
1575 | int dma_init_mem_map(DMA_MemMap_t *memMap) | ||
1576 | { | ||
1577 | memset(memMap, 0, sizeof(*memMap)); | ||
1578 | |||
1579 | sema_init(&memMap->lock, 1); | ||
1580 | |||
1581 | return 0; | ||
1582 | } | ||
1583 | |||
1584 | EXPORT_SYMBOL(dma_init_mem_map); | ||
1585 | |||
1586 | /****************************************************************************/ | ||
1587 | /** | ||
1588 | * Releases any memory currently being held by a memory mapping structure. | ||
1589 | */ | ||
1590 | /****************************************************************************/ | ||
1591 | |||
1592 | int dma_term_mem_map(DMA_MemMap_t *memMap) | ||
1593 | { | ||
1594 | down(&memMap->lock); /* Just being paranoid */ | ||
1595 | |||
1596 | /* Free up any allocated memory */ | ||
1597 | |||
1598 | up(&memMap->lock); | ||
1599 | memset(memMap, 0, sizeof(*memMap)); | ||
1600 | |||
1601 | return 0; | ||
1602 | } | ||
1603 | |||
1604 | EXPORT_SYMBOL(dma_term_mem_map); | ||
1605 | |||
1606 | /****************************************************************************/ | ||
1607 | /** | ||
1608 | * Looks at a memory address and categorizes it. | ||
1609 | * | ||
1610 | * @return One of the values from the DMA_MemType_t enumeration. | ||
1611 | */ | ||
1612 | /****************************************************************************/ | ||
1613 | |||
1614 | DMA_MemType_t dma_mem_type(void *addr) | ||
1615 | { | ||
1616 | unsigned long addrVal = (unsigned long)addr; | ||
1617 | |||
1618 | if (addrVal >= CONSISTENT_BASE) { | ||
1619 | /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */ | ||
1620 | |||
1621 | /* dma_alloc_xxx pages are physically and virtually contiguous */ | ||
1622 | |||
1623 | return DMA_MEM_TYPE_DMA; | ||
1624 | } | ||
1625 | |||
1626 | /* Technically, we could add one more classification. Addresses between VMALLOC_END */ | ||
1627 | /* and the beginning of the DMA virtual address could be considered to be I/O space. */ | ||
1628 | /* Right now, nobody cares about this particular classification, so we ignore it. */ | ||
1629 | |||
1630 | if (is_vmalloc_addr(addr)) { | ||
1631 | /* Address comes from the vmalloc'd region. Pages are virtually */ | ||
1632 | /* contiguous but NOT physically contiguous */ | ||
1633 | |||
1634 | return DMA_MEM_TYPE_VMALLOC; | ||
1635 | } | ||
1636 | |||
1637 | if (addrVal >= PAGE_OFFSET) { | ||
1638 | /* PAGE_OFFSET is typically 0xC0000000 */ | ||
1639 | |||
1640 | /* kmalloc'd pages are physically contiguous */ | ||
1641 | |||
1642 | return DMA_MEM_TYPE_KMALLOC; | ||
1643 | } | ||
1644 | |||
1645 | return DMA_MEM_TYPE_USER; | ||
1646 | } | ||
1647 | |||
1648 | EXPORT_SYMBOL(dma_mem_type); | ||
1649 | |||
1650 | /****************************************************************************/ | ||
1651 | /** | ||
1652 | * Looks at a memory address and determines if we support DMA'ing to/from | ||
1653 | * that type of memory. | ||
1654 | * | ||
1655 | * @return boolean - | ||
1656 | * return value != 0 means dma supported | ||
1657 | * return value == 0 means dma not supported | ||
1658 | */ | ||
1659 | /****************************************************************************/ | ||
1660 | |||
1661 | int dma_mem_supports_dma(void *addr) | ||
1662 | { | ||
1663 | DMA_MemType_t memType = dma_mem_type(addr); | ||
1664 | |||
1665 | return (memType == DMA_MEM_TYPE_DMA) | ||
1666 | #if ALLOW_MAP_OF_KMALLOC_MEMORY | ||
1667 | || (memType == DMA_MEM_TYPE_KMALLOC) | ||
1668 | #endif | ||
1669 | || (memType == DMA_MEM_TYPE_USER); | ||
1670 | } | ||
1671 | |||
1672 | EXPORT_SYMBOL(dma_mem_supports_dma); | ||
1673 | |||
1674 | /****************************************************************************/ | ||
1675 | /** | ||
1676 | * Maps in a memory region such that it can be used for performing a DMA. | ||
1677 | * | ||
1678 | * @return | ||
1679 | */ | ||
1680 | /****************************************************************************/ | ||
1681 | |||
1682 | int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
1683 | enum dma_data_direction dir /* Direction that the mapping will be going */ | ||
1684 | ) { | ||
1685 | int rc; | ||
1686 | |||
1687 | down(&memMap->lock); | ||
1688 | |||
1689 | DMA_MAP_PRINT("memMap: %p\n", memMap); | ||
1690 | |||
1691 | if (memMap->inUse) { | ||
1692 | printk(KERN_ERR "%s: memory map %p is already being used\n", | ||
1693 | __func__, memMap); | ||
1694 | rc = -EBUSY; | ||
1695 | goto out; | ||
1696 | } | ||
1697 | |||
1698 | memMap->inUse = 1; | ||
1699 | memMap->dir = dir; | ||
1700 | memMap->numRegionsUsed = 0; | ||
1701 | |||
1702 | rc = 0; | ||
1703 | |||
1704 | out: | ||
1705 | |||
1706 | DMA_MAP_PRINT("returning %d", rc); | ||
1707 | |||
1708 | up(&memMap->lock); | ||
1709 | |||
1710 | return rc; | ||
1711 | } | ||
1712 | |||
1713 | EXPORT_SYMBOL(dma_map_start); | ||
1714 | |||
1715 | /****************************************************************************/ | ||
1716 | /** | ||
1717 | * Adds a segment of memory to a memory map. Each segment is both | ||
1718 | * physically and virtually contiguous. | ||
1719 | * | ||
1720 | * @return 0 on success, error code otherwise. | ||
1721 | */ | ||
1722 | /****************************************************************************/ | ||
1723 | |||
1724 | static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
1725 | DMA_Region_t *region, /* Region that the segment belongs to */ | ||
1726 | void *virtAddr, /* Virtual address of the segment being added */ | ||
1727 | dma_addr_t physAddr, /* Physical address of the segment being added */ | ||
1728 | size_t numBytes /* Number of bytes of the segment being added */ | ||
1729 | ) { | ||
1730 | DMA_Segment_t *segment; | ||
1731 | |||
1732 | DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr, | ||
1733 | physAddr, numBytes); | ||
1734 | |||
1735 | /* Sanity check */ | ||
1736 | |||
1737 | if (((unsigned long)virtAddr < (unsigned long)region->virtAddr) | ||
1738 | || (((unsigned long)virtAddr + numBytes)) > | ||
1739 | ((unsigned long)region->virtAddr + region->numBytes)) { | ||
1740 | printk(KERN_ERR | ||
1741 | "%s: virtAddr %p is outside region @ %p len: %d\n", | ||
1742 | __func__, virtAddr, region->virtAddr, region->numBytes); | ||
1743 | return -EINVAL; | ||
1744 | } | ||
1745 | |||
1746 | if (region->numSegmentsUsed > 0) { | ||
1747 | /* Check to see if this segment is physically contiguous with the previous one */ | ||
1748 | |||
1749 | segment = ®ion->segment[region->numSegmentsUsed - 1]; | ||
1750 | |||
1751 | if ((segment->physAddr + segment->numBytes) == physAddr) { | ||
1752 | /* It is - just add on to the end */ | ||
1753 | |||
1754 | DMA_MAP_PRINT("appending %d bytes to last segment\n", | ||
1755 | numBytes); | ||
1756 | |||
1757 | segment->numBytes += numBytes; | ||
1758 | |||
1759 | return 0; | ||
1760 | } | ||
1761 | } | ||
1762 | |||
1763 | /* Reallocate to hold more segments, if required. */ | ||
1764 | |||
1765 | if (region->numSegmentsUsed >= region->numSegmentsAllocated) { | ||
1766 | DMA_Segment_t *newSegment; | ||
1767 | size_t oldSize = | ||
1768 | region->numSegmentsAllocated * sizeof(*newSegment); | ||
1769 | int newAlloc = region->numSegmentsAllocated + 4; | ||
1770 | size_t newSize = newAlloc * sizeof(*newSegment); | ||
1771 | |||
1772 | newSegment = kmalloc(newSize, GFP_KERNEL); | ||
1773 | if (newSegment == NULL) { | ||
1774 | return -ENOMEM; | ||
1775 | } | ||
1776 | memcpy(newSegment, region->segment, oldSize); | ||
1777 | memset(&((uint8_t *) newSegment)[oldSize], 0, | ||
1778 | newSize - oldSize); | ||
1779 | kfree(region->segment); | ||
1780 | |||
1781 | region->numSegmentsAllocated = newAlloc; | ||
1782 | region->segment = newSegment; | ||
1783 | } | ||
1784 | |||
1785 | segment = ®ion->segment[region->numSegmentsUsed]; | ||
1786 | region->numSegmentsUsed++; | ||
1787 | |||
1788 | segment->virtAddr = virtAddr; | ||
1789 | segment->physAddr = physAddr; | ||
1790 | segment->numBytes = numBytes; | ||
1791 | |||
1792 | DMA_MAP_PRINT("returning success\n"); | ||
1793 | |||
1794 | return 0; | ||
1795 | } | ||
1796 | |||
1797 | /****************************************************************************/ | ||
1798 | /** | ||
1799 | * Adds a region of memory to a memory map. Each region is virtually | ||
1800 | * contiguous, but not necessarily physically contiguous. | ||
1801 | * | ||
1802 | * @return 0 on success, error code otherwise. | ||
1803 | */ | ||
1804 | /****************************************************************************/ | ||
1805 | |||
1806 | int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
1807 | void *mem, /* Virtual address that we want to get a map of */ | ||
1808 | size_t numBytes /* Number of bytes being mapped */ | ||
1809 | ) { | ||
1810 | unsigned long addr = (unsigned long)mem; | ||
1811 | unsigned int offset; | ||
1812 | int rc = 0; | ||
1813 | DMA_Region_t *region; | ||
1814 | dma_addr_t physAddr; | ||
1815 | |||
1816 | down(&memMap->lock); | ||
1817 | |||
1818 | DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes); | ||
1819 | |||
1820 | if (!memMap->inUse) { | ||
1821 | printk(KERN_ERR "%s: Make sure you call dma_map_start first\n", | ||
1822 | __func__); | ||
1823 | rc = -EINVAL; | ||
1824 | goto out; | ||
1825 | } | ||
1826 | |||
1827 | /* Reallocate to hold more regions. */ | ||
1828 | |||
1829 | if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) { | ||
1830 | DMA_Region_t *newRegion; | ||
1831 | size_t oldSize = | ||
1832 | memMap->numRegionsAllocated * sizeof(*newRegion); | ||
1833 | int newAlloc = memMap->numRegionsAllocated + 4; | ||
1834 | size_t newSize = newAlloc * sizeof(*newRegion); | ||
1835 | |||
1836 | newRegion = kmalloc(newSize, GFP_KERNEL); | ||
1837 | if (newRegion == NULL) { | ||
1838 | rc = -ENOMEM; | ||
1839 | goto out; | ||
1840 | } | ||
1841 | memcpy(newRegion, memMap->region, oldSize); | ||
1842 | memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize); | ||
1843 | |||
1844 | kfree(memMap->region); | ||
1845 | |||
1846 | memMap->numRegionsAllocated = newAlloc; | ||
1847 | memMap->region = newRegion; | ||
1848 | } | ||
1849 | |||
1850 | region = &memMap->region[memMap->numRegionsUsed]; | ||
1851 | memMap->numRegionsUsed++; | ||
1852 | |||
1853 | offset = addr & ~PAGE_MASK; | ||
1854 | |||
1855 | region->memType = dma_mem_type(mem); | ||
1856 | region->virtAddr = mem; | ||
1857 | region->numBytes = numBytes; | ||
1858 | region->numSegmentsUsed = 0; | ||
1859 | region->numLockedPages = 0; | ||
1860 | region->lockedPages = NULL; | ||
1861 | |||
1862 | switch (region->memType) { | ||
1863 | case DMA_MEM_TYPE_VMALLOC: | ||
1864 | { | ||
1865 | atomic_inc(&gDmaStatMemTypeVmalloc); | ||
1866 | |||
1867 | /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */ | ||
1868 | |||
1869 | /* vmalloc'd pages are not physically contiguous */ | ||
1870 | |||
1871 | rc = -EINVAL; | ||
1872 | break; | ||
1873 | } | ||
1874 | |||
1875 | case DMA_MEM_TYPE_KMALLOC: | ||
1876 | { | ||
1877 | atomic_inc(&gDmaStatMemTypeKmalloc); | ||
1878 | |||
1879 | /* kmalloc'd pages are physically contiguous, so they'll have exactly */ | ||
1880 | /* one segment */ | ||
1881 | |||
1882 | #if ALLOW_MAP_OF_KMALLOC_MEMORY | ||
1883 | physAddr = | ||
1884 | dma_map_single(NULL, mem, numBytes, memMap->dir); | ||
1885 | rc = dma_map_add_segment(memMap, region, mem, physAddr, | ||
1886 | numBytes); | ||
1887 | #else | ||
1888 | rc = -EINVAL; | ||
1889 | #endif | ||
1890 | break; | ||
1891 | } | ||
1892 | |||
1893 | case DMA_MEM_TYPE_DMA: | ||
1894 | { | ||
1895 | /* dma_alloc_xxx pages are physically contiguous */ | ||
1896 | |||
1897 | atomic_inc(&gDmaStatMemTypeCoherent); | ||
1898 | |||
1899 | physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset; | ||
1900 | |||
1901 | dma_sync_single_for_cpu(NULL, physAddr, numBytes, | ||
1902 | memMap->dir); | ||
1903 | rc = dma_map_add_segment(memMap, region, mem, physAddr, | ||
1904 | numBytes); | ||
1905 | break; | ||
1906 | } | ||
1907 | |||
1908 | case DMA_MEM_TYPE_USER: | ||
1909 | { | ||
1910 | size_t firstPageOffset; | ||
1911 | size_t firstPageSize; | ||
1912 | struct page **pages; | ||
1913 | struct task_struct *userTask; | ||
1914 | |||
1915 | atomic_inc(&gDmaStatMemTypeUser); | ||
1916 | |||
1917 | #if 1 | ||
1918 | /* If the pages are user pages, then the dma_mem_map_set_user_task function */ | ||
1919 | /* must have been previously called. */ | ||
1920 | |||
1921 | if (memMap->userTask == NULL) { | ||
1922 | printk(KERN_ERR | ||
1923 | "%s: must call dma_mem_map_set_user_task when using user-mode memory\n", | ||
1924 | __func__); | ||
1925 | return -EINVAL; | ||
1926 | } | ||
1927 | |||
1928 | /* User pages need to be locked. */ | ||
1929 | |||
1930 | firstPageOffset = | ||
1931 | (unsigned long)region->virtAddr & (PAGE_SIZE - 1); | ||
1932 | firstPageSize = PAGE_SIZE - firstPageOffset; | ||
1933 | |||
1934 | region->numLockedPages = (firstPageOffset | ||
1935 | + region->numBytes + | ||
1936 | PAGE_SIZE - 1) / PAGE_SIZE; | ||
1937 | pages = | ||
1938 | kmalloc(region->numLockedPages * | ||
1939 | sizeof(struct page *), GFP_KERNEL); | ||
1940 | |||
1941 | if (pages == NULL) { | ||
1942 | region->numLockedPages = 0; | ||
1943 | return -ENOMEM; | ||
1944 | } | ||
1945 | |||
1946 | userTask = memMap->userTask; | ||
1947 | |||
1948 | down_read(&userTask->mm->mmap_sem); | ||
1949 | rc = get_user_pages(userTask, /* task */ | ||
1950 | userTask->mm, /* mm */ | ||
1951 | (unsigned long)region->virtAddr, /* start */ | ||
1952 | region->numLockedPages, /* len */ | ||
1953 | memMap->dir == DMA_FROM_DEVICE, /* write */ | ||
1954 | 0, /* force */ | ||
1955 | pages, /* pages (array of pointers to page) */ | ||
1956 | NULL); /* vmas */ | ||
1957 | up_read(&userTask->mm->mmap_sem); | ||
1958 | |||
1959 | if (rc != region->numLockedPages) { | ||
1960 | kfree(pages); | ||
1961 | region->numLockedPages = 0; | ||
1962 | |||
1963 | if (rc >= 0) { | ||
1964 | rc = -EINVAL; | ||
1965 | } | ||
1966 | } else { | ||
1967 | uint8_t *virtAddr = region->virtAddr; | ||
1968 | size_t bytesRemaining; | ||
1969 | int pageIdx; | ||
1970 | |||
1971 | rc = 0; /* Since get_user_pages returns +ve number */ | ||
1972 | |||
1973 | region->lockedPages = pages; | ||
1974 | |||
1975 | /* We've locked the user pages. Now we need to walk them and figure */ | ||
1976 | /* out the physical addresses. */ | ||
1977 | |||
1978 | /* The first page may be partial */ | ||
1979 | |||
1980 | dma_map_add_segment(memMap, | ||
1981 | region, | ||
1982 | virtAddr, | ||
1983 | PFN_PHYS(page_to_pfn | ||
1984 | (pages[0])) + | ||
1985 | firstPageOffset, | ||
1986 | firstPageSize); | ||
1987 | |||
1988 | virtAddr += firstPageSize; | ||
1989 | bytesRemaining = | ||
1990 | region->numBytes - firstPageSize; | ||
1991 | |||
1992 | for (pageIdx = 1; | ||
1993 | pageIdx < region->numLockedPages; | ||
1994 | pageIdx++) { | ||
1995 | size_t bytesThisPage = | ||
1996 | (bytesRemaining > | ||
1997 | PAGE_SIZE ? PAGE_SIZE : | ||
1998 | bytesRemaining); | ||
1999 | |||
2000 | DMA_MAP_PRINT | ||
2001 | ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n", | ||
2002 | pageIdx, pages[pageIdx], | ||
2003 | page_to_pfn(pages[pageIdx]), | ||
2004 | PFN_PHYS(page_to_pfn | ||
2005 | (pages[pageIdx]))); | ||
2006 | |||
2007 | dma_map_add_segment(memMap, | ||
2008 | region, | ||
2009 | virtAddr, | ||
2010 | PFN_PHYS(page_to_pfn | ||
2011 | (pages | ||
2012 | [pageIdx])), | ||
2013 | bytesThisPage); | ||
2014 | |||
2015 | virtAddr += bytesThisPage; | ||
2016 | bytesRemaining -= bytesThisPage; | ||
2017 | } | ||
2018 | } | ||
2019 | #else | ||
2020 | printk(KERN_ERR | ||
2021 | "%s: User mode pages are not yet supported\n", | ||
2022 | __func__); | ||
2023 | |||
2024 | /* user pages are not physically contiguous */ | ||
2025 | |||
2026 | rc = -EINVAL; | ||
2027 | #endif | ||
2028 | break; | ||
2029 | } | ||
2030 | |||
2031 | default: | ||
2032 | { | ||
2033 | printk(KERN_ERR "%s: Unsupported memory type: %d\n", | ||
2034 | __func__, region->memType); | ||
2035 | |||
2036 | rc = -EINVAL; | ||
2037 | break; | ||
2038 | } | ||
2039 | } | ||
2040 | |||
2041 | if (rc != 0) { | ||
2042 | memMap->numRegionsUsed--; | ||
2043 | } | ||
2044 | |||
2045 | out: | ||
2046 | |||
2047 | DMA_MAP_PRINT("returning %d\n", rc); | ||
2048 | |||
2049 | up(&memMap->lock); | ||
2050 | |||
2051 | return rc; | ||
2052 | } | ||
2053 | |||
2054 | EXPORT_SYMBOL(dma_map_add_segment); | ||
2055 | |||
2056 | /****************************************************************************/ | ||
2057 | /** | ||
2058 | * Maps in a memory region such that it can be used for performing a DMA. | ||
2059 | * | ||
2060 | * @return 0 on success, error code otherwise. | ||
2061 | */ | ||
2062 | /****************************************************************************/ | ||
2063 | |||
2064 | int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
2065 | void *mem, /* Virtual address that we want to get a map of */ | ||
2066 | size_t numBytes, /* Number of bytes being mapped */ | ||
2067 | enum dma_data_direction dir /* Direction that the mapping will be going */ | ||
2068 | ) { | ||
2069 | int rc; | ||
2070 | |||
2071 | rc = dma_map_start(memMap, dir); | ||
2072 | if (rc == 0) { | ||
2073 | rc = dma_map_add_region(memMap, mem, numBytes); | ||
2074 | if (rc < 0) { | ||
2075 | /* Since the add fails, this function will fail, and the caller won't */ | ||
2076 | /* call unmap, so we need to do it here. */ | ||
2077 | |||
2078 | dma_unmap(memMap, 0); | ||
2079 | } | ||
2080 | } | ||
2081 | |||
2082 | return rc; | ||
2083 | } | ||
2084 | |||
2085 | EXPORT_SYMBOL(dma_map_mem); | ||
2086 | |||
2087 | /****************************************************************************/ | ||
2088 | /** | ||
2089 | * Setup a descriptor ring for a given memory map. | ||
2090 | * | ||
2091 | * It is assumed that the descriptor ring has already been initialized, and | ||
2092 | * this routine will only reallocate a new descriptor ring if the existing | ||
2093 | * one is too small. | ||
2094 | * | ||
2095 | * @return 0 on success, error code otherwise. | ||
2096 | */ | ||
2097 | /****************************************************************************/ | ||
2098 | |||
2099 | int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */ | ||
2100 | DMA_MemMap_t *memMap, /* Memory map that will be used */ | ||
2101 | dma_addr_t devPhysAddr /* Physical address of device */ | ||
2102 | ) { | ||
2103 | int rc; | ||
2104 | int numDescriptors; | ||
2105 | DMA_DeviceAttribute_t *devAttr; | ||
2106 | DMA_Region_t *region; | ||
2107 | DMA_Segment_t *segment; | ||
2108 | dma_addr_t srcPhysAddr; | ||
2109 | dma_addr_t dstPhysAddr; | ||
2110 | int regionIdx; | ||
2111 | int segmentIdx; | ||
2112 | |||
2113 | devAttr = &DMA_gDeviceAttribute[dev]; | ||
2114 | |||
2115 | down(&memMap->lock); | ||
2116 | |||
2117 | /* Figure out how many descriptors we need */ | ||
2118 | |||
2119 | numDescriptors = 0; | ||
2120 | for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { | ||
2121 | region = &memMap->region[regionIdx]; | ||
2122 | |||
2123 | for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; | ||
2124 | segmentIdx++) { | ||
2125 | segment = ®ion->segment[segmentIdx]; | ||
2126 | |||
2127 | if (memMap->dir == DMA_TO_DEVICE) { | ||
2128 | srcPhysAddr = segment->physAddr; | ||
2129 | dstPhysAddr = devPhysAddr; | ||
2130 | } else { | ||
2131 | srcPhysAddr = devPhysAddr; | ||
2132 | dstPhysAddr = segment->physAddr; | ||
2133 | } | ||
2134 | |||
2135 | rc = | ||
2136 | dma_calculate_descriptor_count(dev, srcPhysAddr, | ||
2137 | dstPhysAddr, | ||
2138 | segment-> | ||
2139 | numBytes); | ||
2140 | if (rc < 0) { | ||
2141 | printk(KERN_ERR | ||
2142 | "%s: dma_calculate_descriptor_count failed: %d\n", | ||
2143 | __func__, rc); | ||
2144 | goto out; | ||
2145 | } | ||
2146 | numDescriptors += rc; | ||
2147 | } | ||
2148 | } | ||
2149 | |||
2150 | /* Adjust the size of the ring, if it isn't big enough */ | ||
2151 | |||
2152 | if (numDescriptors > devAttr->ring.descriptorsAllocated) { | ||
2153 | dma_free_descriptor_ring(&devAttr->ring); | ||
2154 | rc = | ||
2155 | dma_alloc_descriptor_ring(&devAttr->ring, | ||
2156 | numDescriptors); | ||
2157 | if (rc < 0) { | ||
2158 | printk(KERN_ERR | ||
2159 | "%s: dma_alloc_descriptor_ring failed: %d\n", | ||
2160 | __func__, rc); | ||
2161 | goto out; | ||
2162 | } | ||
2163 | } else { | ||
2164 | rc = | ||
2165 | dma_init_descriptor_ring(&devAttr->ring, | ||
2166 | numDescriptors); | ||
2167 | if (rc < 0) { | ||
2168 | printk(KERN_ERR | ||
2169 | "%s: dma_init_descriptor_ring failed: %d\n", | ||
2170 | __func__, rc); | ||
2171 | goto out; | ||
2172 | } | ||
2173 | } | ||
2174 | |||
2175 | /* Populate the descriptors */ | ||
2176 | |||
2177 | for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { | ||
2178 | region = &memMap->region[regionIdx]; | ||
2179 | |||
2180 | for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; | ||
2181 | segmentIdx++) { | ||
2182 | segment = ®ion->segment[segmentIdx]; | ||
2183 | |||
2184 | if (memMap->dir == DMA_TO_DEVICE) { | ||
2185 | srcPhysAddr = segment->physAddr; | ||
2186 | dstPhysAddr = devPhysAddr; | ||
2187 | } else { | ||
2188 | srcPhysAddr = devPhysAddr; | ||
2189 | dstPhysAddr = segment->physAddr; | ||
2190 | } | ||
2191 | |||
2192 | rc = | ||
2193 | dma_add_descriptors(&devAttr->ring, dev, | ||
2194 | srcPhysAddr, dstPhysAddr, | ||
2195 | segment->numBytes); | ||
2196 | if (rc < 0) { | ||
2197 | printk(KERN_ERR | ||
2198 | "%s: dma_add_descriptors failed: %d\n", | ||
2199 | __func__, rc); | ||
2200 | goto out; | ||
2201 | } | ||
2202 | } | ||
2203 | } | ||
2204 | |||
2205 | rc = 0; | ||
2206 | |||
2207 | out: | ||
2208 | |||
2209 | up(&memMap->lock); | ||
2210 | return rc; | ||
2211 | } | ||
2212 | |||
2213 | EXPORT_SYMBOL(dma_map_create_descriptor_ring); | ||
2214 | |||
2215 | /****************************************************************************/ | ||
2216 | /** | ||
2217 | * Maps in a memory region such that it can be used for performing a DMA. | ||
2218 | * | ||
2219 | * @return | ||
2220 | */ | ||
2221 | /****************************************************************************/ | ||
2222 | |||
2223 | int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ | ||
2224 | int dirtied /* non-zero if any of the pages were modified */ | ||
2225 | ) { | ||
2226 | |||
2227 | int rc = 0; | ||
2228 | int regionIdx; | ||
2229 | int segmentIdx; | ||
2230 | DMA_Region_t *region; | ||
2231 | DMA_Segment_t *segment; | ||
2232 | |||
2233 | down(&memMap->lock); | ||
2234 | |||
2235 | for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { | ||
2236 | region = &memMap->region[regionIdx]; | ||
2237 | |||
2238 | for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; | ||
2239 | segmentIdx++) { | ||
2240 | segment = ®ion->segment[segmentIdx]; | ||
2241 | |||
2242 | switch (region->memType) { | ||
2243 | case DMA_MEM_TYPE_VMALLOC: | ||
2244 | { | ||
2245 | printk(KERN_ERR | ||
2246 | "%s: vmalloc'd pages are not yet supported\n", | ||
2247 | __func__); | ||
2248 | rc = -EINVAL; | ||
2249 | goto out; | ||
2250 | } | ||
2251 | |||
2252 | case DMA_MEM_TYPE_KMALLOC: | ||
2253 | { | ||
2254 | #if ALLOW_MAP_OF_KMALLOC_MEMORY | ||
2255 | dma_unmap_single(NULL, | ||
2256 | segment->physAddr, | ||
2257 | segment->numBytes, | ||
2258 | memMap->dir); | ||
2259 | #endif | ||
2260 | break; | ||
2261 | } | ||
2262 | |||
2263 | case DMA_MEM_TYPE_DMA: | ||
2264 | { | ||
2265 | dma_sync_single_for_cpu(NULL, | ||
2266 | segment-> | ||
2267 | physAddr, | ||
2268 | segment-> | ||
2269 | numBytes, | ||
2270 | memMap->dir); | ||
2271 | break; | ||
2272 | } | ||
2273 | |||
2274 | case DMA_MEM_TYPE_USER: | ||
2275 | { | ||
2276 | /* Nothing to do here. */ | ||
2277 | |||
2278 | break; | ||
2279 | } | ||
2280 | |||
2281 | default: | ||
2282 | { | ||
2283 | printk(KERN_ERR | ||
2284 | "%s: Unsupported memory type: %d\n", | ||
2285 | __func__, region->memType); | ||
2286 | rc = -EINVAL; | ||
2287 | goto out; | ||
2288 | } | ||
2289 | } | ||
2290 | |||
2291 | segment->virtAddr = NULL; | ||
2292 | segment->physAddr = 0; | ||
2293 | segment->numBytes = 0; | ||
2294 | } | ||
2295 | |||
2296 | if (region->numLockedPages > 0) { | ||
2297 | int pageIdx; | ||
2298 | |||
2299 | /* Some user pages were locked. We need to go and unlock them now. */ | ||
2300 | |||
2301 | for (pageIdx = 0; pageIdx < region->numLockedPages; | ||
2302 | pageIdx++) { | ||
2303 | struct page *page = | ||
2304 | region->lockedPages[pageIdx]; | ||
2305 | |||
2306 | if (memMap->dir == DMA_FROM_DEVICE) { | ||
2307 | SetPageDirty(page); | ||
2308 | } | ||
2309 | page_cache_release(page); | ||
2310 | } | ||
2311 | kfree(region->lockedPages); | ||
2312 | region->numLockedPages = 0; | ||
2313 | region->lockedPages = NULL; | ||
2314 | } | ||
2315 | |||
2316 | region->memType = DMA_MEM_TYPE_NONE; | ||
2317 | region->virtAddr = NULL; | ||
2318 | region->numBytes = 0; | ||
2319 | region->numSegmentsUsed = 0; | ||
2320 | } | ||
2321 | memMap->userTask = NULL; | ||
2322 | memMap->numRegionsUsed = 0; | ||
2323 | memMap->inUse = 0; | ||
2324 | |||
2325 | out: | ||
2326 | up(&memMap->lock); | ||
2327 | |||
2328 | return rc; | ||
2329 | } | ||
2330 | |||
2331 | EXPORT_SYMBOL(dma_unmap); | ||