aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-04 15:11:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-02-04 15:11:40 -0500
commitd9142025f55973149a854c97e860fff61ed05b37 (patch)
tree01e1b887513792550b0bdd5b73708c657af876ab /arch
parent31c150a11c867da233a7b5e13b45bcbd3a796bde (diff)
parentca43784daa7a400407d851799ac69d3de2b2ab4e (diff)
Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
arm-soc fixes for 3.3-rc * A series of OMAP regression fixes for merge window fallout * Two patches for Davinci, one removes some misdefined clocks, the other is a regression fix for merge window fallout * Two patches that makes Broadcom bcmring build again (and removes a bunch of unused code in the process) * tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: ARM: bcmring: fix build failure in mach-bcmring/arch.c ARM: bcmring: remove unused DMA map code ARM: davinci: update mdio bus name ARM: OMAP2+: arch/arm/mach-omap2/smartreflex.c: add missing iounmap ARM: OMAP2+: arch/arm/mach-omap2/devices.c: introduce missing kfree ARM: OMAP: fix MMC2 loopback clock handling ARM: OMAP: fix erroneous mmc2 clock change on mmc3 setup ARM: OMAP2+: GPMC: fix device size setup ARM: OMAP2+: timer: Fix crash due to wrong arg to __omap_dm_timer_read_counter ARM: OMAP3: hwmod data: register dss hwmods after dss_core ARM: OMAP2/3: PRM: fix missing plat/irqs.h build breakage ARM: OMAP2+: io: fix compilation breakage on 2420-only configs ARM: OMAP4: hwmod data: Add names for DMIC memory address space ARM: OMAP3: hwmod data: add SYSC_HAS_ENAWAKEUP for dispc ARM: OMAP2+: hwmod data: split omap2/3 dispc hwmod class ARM: davinci: DA850: remove non-existing pll1_sysclk4-7 clocks ARM: OMAP2: fix regulator warnings ARM: OMAP2: fix omap3 touchbook kconfig warning i2c: OMAP: Fix OMAP1 build error
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-bcmring/arch.c2
-rw-r--r--arch/arm/mach-bcmring/dma.c812
-rw-r--r--arch/arm/mach-bcmring/include/mach/dma.h196
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c2
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c2
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c2
-rw-r--r--arch/arm/mach-davinci/da850.c32
-rw-r--r--arch/arm/mach-omap2/Kconfig11
-rw-r--r--arch/arm/mach-omap2/devices.c1
-rw-r--r--arch/arm/mach-omap2/gpmc.c6
-rw-r--r--arch/arm/mach-omap2/hsmmc.c16
-rw-r--r--arch/arm/mach-omap2/io.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c21
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c22
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c54
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.c1
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-omap2/timer.c2
23 files changed, 105 insertions, 1093 deletions
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index 9e5e7552498c..45c97b1ee9b1 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -194,6 +194,6 @@ MACHINE_START(BCMRING, "BCMRING")
194 .init_early = bcmring_init_early, 194 .init_early = bcmring_init_early,
195 .init_irq = bcmring_init_irq, 195 .init_irq = bcmring_init_irq,
196 .timer = &bcmring_timer, 196 .timer = &bcmring_timer,
197 .init_machine = bcmring_init_machine 197 .init_machine = bcmring_init_machine,
198 .restart = bcmring_restart, 198 .restart = bcmring_restart,
199MACHINE_END 199MACHINE_END
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 1a1a27dd5654..1024396797e1 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -33,17 +33,11 @@
33 33
34#include <mach/timer.h> 34#include <mach/timer.h>
35 35
36#include <linux/mm.h>
37#include <linux/pfn.h> 36#include <linux/pfn.h>
38#include <linux/atomic.h> 37#include <linux/atomic.h>
39#include <linux/sched.h> 38#include <linux/sched.h>
40#include <mach/dma.h> 39#include <mach/dma.h>
41 40
42/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
43/* especially since dc4 doesn't use kmalloc'd memory. */
44
45#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
46
47/* ---- Public Variables ------------------------------------------------- */ 41/* ---- Public Variables ------------------------------------------------- */
48 42
49/* ---- Private Constants and Types -------------------------------------- */ 43/* ---- Private Constants and Types -------------------------------------- */
@@ -53,24 +47,12 @@
53#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) 47#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f)
54#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) 48#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f)
55 49
56#define DMA_MAP_DEBUG 0
57
58#if DMA_MAP_DEBUG
59# define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args)
60#else
61# define DMA_MAP_PRINT(fmt, args...)
62#endif
63 50
64/* ---- Private Variables ------------------------------------------------ */ 51/* ---- Private Variables ------------------------------------------------ */
65 52
66static DMA_Global_t gDMA; 53static DMA_Global_t gDMA;
67static struct proc_dir_entry *gDmaDir; 54static struct proc_dir_entry *gDmaDir;
68 55
69static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
70static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
71static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
72static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
73
74#include "dma_device.c" 56#include "dma_device.c"
75 57
76/* ---- Private Function Prototypes -------------------------------------- */ 58/* ---- Private Function Prototypes -------------------------------------- */
@@ -79,34 +61,6 @@ static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
79 61
80/****************************************************************************/ 62/****************************************************************************/
81/** 63/**
82* Displays information for /proc/dma/mem-type
83*/
84/****************************************************************************/
85
86static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
87 int count, int *eof, void *data)
88{
89 int len = 0;
90
91 len += sprintf(buf + len, "dma_map_mem statistics\n");
92 len +=
93 sprintf(buf + len, "coherent: %d\n",
94 atomic_read(&gDmaStatMemTypeCoherent));
95 len +=
96 sprintf(buf + len, "kmalloc: %d\n",
97 atomic_read(&gDmaStatMemTypeKmalloc));
98 len +=
99 sprintf(buf + len, "vmalloc: %d\n",
100 atomic_read(&gDmaStatMemTypeVmalloc));
101 len +=
102 sprintf(buf + len, "user: %d\n",
103 atomic_read(&gDmaStatMemTypeUser));
104
105 return len;
106}
107
108/****************************************************************************/
109/**
110* Displays information for /proc/dma/channels 64* Displays information for /proc/dma/channels
111*/ 65*/
112/****************************************************************************/ 66/****************************************************************************/
@@ -846,8 +800,6 @@ int dma_init(void)
846 dma_proc_read_channels, NULL); 800 dma_proc_read_channels, NULL);
847 create_proc_read_entry("devices", 0, gDmaDir, 801 create_proc_read_entry("devices", 0, gDmaDir,
848 dma_proc_read_devices, NULL); 802 dma_proc_read_devices, NULL);
849 create_proc_read_entry("mem-type", 0, gDmaDir,
850 dma_proc_read_mem_type, NULL);
851 } 803 }
852 804
853out: 805out:
@@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for.
1565} 1517}
1566 1518
1567EXPORT_SYMBOL(dma_set_device_handler); 1519EXPORT_SYMBOL(dma_set_device_handler);
1568
1569/****************************************************************************/
1570/**
1571* Initializes a memory mapping structure
1572*/
1573/****************************************************************************/
1574
1575int dma_init_mem_map(DMA_MemMap_t *memMap)
1576{
1577 memset(memMap, 0, sizeof(*memMap));
1578
1579 sema_init(&memMap->lock, 1);
1580
1581 return 0;
1582}
1583
1584EXPORT_SYMBOL(dma_init_mem_map);
1585
1586/****************************************************************************/
1587/**
1588* Releases any memory currently being held by a memory mapping structure.
1589*/
1590/****************************************************************************/
1591
1592int dma_term_mem_map(DMA_MemMap_t *memMap)
1593{
1594 down(&memMap->lock); /* Just being paranoid */
1595
1596 /* Free up any allocated memory */
1597
1598 up(&memMap->lock);
1599 memset(memMap, 0, sizeof(*memMap));
1600
1601 return 0;
1602}
1603
1604EXPORT_SYMBOL(dma_term_mem_map);
1605
1606/****************************************************************************/
1607/**
1608* Looks at a memory address and categorizes it.
1609*
1610* @return One of the values from the DMA_MemType_t enumeration.
1611*/
1612/****************************************************************************/
1613
1614DMA_MemType_t dma_mem_type(void *addr)
1615{
1616 unsigned long addrVal = (unsigned long)addr;
1617
1618 if (addrVal >= CONSISTENT_BASE) {
1619 /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
1620
1621 /* dma_alloc_xxx pages are physically and virtually contiguous */
1622
1623 return DMA_MEM_TYPE_DMA;
1624 }
1625
1626 /* Technically, we could add one more classification. Addresses between VMALLOC_END */
1627 /* and the beginning of the DMA virtual address could be considered to be I/O space. */
1628 /* Right now, nobody cares about this particular classification, so we ignore it. */
1629
1630 if (is_vmalloc_addr(addr)) {
1631 /* Address comes from the vmalloc'd region. Pages are virtually */
1632 /* contiguous but NOT physically contiguous */
1633
1634 return DMA_MEM_TYPE_VMALLOC;
1635 }
1636
1637 if (addrVal >= PAGE_OFFSET) {
1638 /* PAGE_OFFSET is typically 0xC0000000 */
1639
1640 /* kmalloc'd pages are physically contiguous */
1641
1642 return DMA_MEM_TYPE_KMALLOC;
1643 }
1644
1645 return DMA_MEM_TYPE_USER;
1646}
1647
1648EXPORT_SYMBOL(dma_mem_type);
1649
1650/****************************************************************************/
1651/**
1652* Looks at a memory address and determines if we support DMA'ing to/from
1653* that type of memory.
1654*
1655* @return boolean -
1656* return value != 0 means dma supported
1657* return value == 0 means dma not supported
1658*/
1659/****************************************************************************/
1660
1661int dma_mem_supports_dma(void *addr)
1662{
1663 DMA_MemType_t memType = dma_mem_type(addr);
1664
1665 return (memType == DMA_MEM_TYPE_DMA)
1666#if ALLOW_MAP_OF_KMALLOC_MEMORY
1667 || (memType == DMA_MEM_TYPE_KMALLOC)
1668#endif
1669 || (memType == DMA_MEM_TYPE_USER);
1670}
1671
1672EXPORT_SYMBOL(dma_mem_supports_dma);
1673
1674/****************************************************************************/
1675/**
1676* Maps in a memory region such that it can be used for performing a DMA.
1677*
1678* @return
1679*/
1680/****************************************************************************/
1681
1682int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
1683 enum dma_data_direction dir /* Direction that the mapping will be going */
1684 ) {
1685 int rc;
1686
1687 down(&memMap->lock);
1688
1689 DMA_MAP_PRINT("memMap: %p\n", memMap);
1690
1691 if (memMap->inUse) {
1692 printk(KERN_ERR "%s: memory map %p is already being used\n",
1693 __func__, memMap);
1694 rc = -EBUSY;
1695 goto out;
1696 }
1697
1698 memMap->inUse = 1;
1699 memMap->dir = dir;
1700 memMap->numRegionsUsed = 0;
1701
1702 rc = 0;
1703
1704out:
1705
1706 DMA_MAP_PRINT("returning %d", rc);
1707
1708 up(&memMap->lock);
1709
1710 return rc;
1711}
1712
1713EXPORT_SYMBOL(dma_map_start);
1714
1715/****************************************************************************/
1716/**
1717* Adds a segment of memory to a memory map. Each segment is both
1718* physically and virtually contiguous.
1719*
1720* @return 0 on success, error code otherwise.
1721*/
1722/****************************************************************************/
1723
1724static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */
1725 DMA_Region_t *region, /* Region that the segment belongs to */
1726 void *virtAddr, /* Virtual address of the segment being added */
1727 dma_addr_t physAddr, /* Physical address of the segment being added */
1728 size_t numBytes /* Number of bytes of the segment being added */
1729 ) {
1730 DMA_Segment_t *segment;
1731
1732 DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
1733 physAddr, numBytes);
1734
1735 /* Sanity check */
1736
1737 if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
1738 || (((unsigned long)virtAddr + numBytes)) >
1739 ((unsigned long)region->virtAddr + region->numBytes)) {
1740 printk(KERN_ERR
1741 "%s: virtAddr %p is outside region @ %p len: %d\n",
1742 __func__, virtAddr, region->virtAddr, region->numBytes);
1743 return -EINVAL;
1744 }
1745
1746 if (region->numSegmentsUsed > 0) {
1747 /* Check to see if this segment is physically contiguous with the previous one */
1748
1749 segment = &region->segment[region->numSegmentsUsed - 1];
1750
1751 if ((segment->physAddr + segment->numBytes) == physAddr) {
1752 /* It is - just add on to the end */
1753
1754 DMA_MAP_PRINT("appending %d bytes to last segment\n",
1755 numBytes);
1756
1757 segment->numBytes += numBytes;
1758
1759 return 0;
1760 }
1761 }
1762
1763 /* Reallocate to hold more segments, if required. */
1764
1765 if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
1766 DMA_Segment_t *newSegment;
1767 size_t oldSize =
1768 region->numSegmentsAllocated * sizeof(*newSegment);
1769 int newAlloc = region->numSegmentsAllocated + 4;
1770 size_t newSize = newAlloc * sizeof(*newSegment);
1771
1772 newSegment = kmalloc(newSize, GFP_KERNEL);
1773 if (newSegment == NULL) {
1774 return -ENOMEM;
1775 }
1776 memcpy(newSegment, region->segment, oldSize);
1777 memset(&((uint8_t *) newSegment)[oldSize], 0,
1778 newSize - oldSize);
1779 kfree(region->segment);
1780
1781 region->numSegmentsAllocated = newAlloc;
1782 region->segment = newSegment;
1783 }
1784
1785 segment = &region->segment[region->numSegmentsUsed];
1786 region->numSegmentsUsed++;
1787
1788 segment->virtAddr = virtAddr;
1789 segment->physAddr = physAddr;
1790 segment->numBytes = numBytes;
1791
1792 DMA_MAP_PRINT("returning success\n");
1793
1794 return 0;
1795}
1796
1797/****************************************************************************/
1798/**
1799* Adds a region of memory to a memory map. Each region is virtually
1800* contiguous, but not necessarily physically contiguous.
1801*
1802* @return 0 on success, error code otherwise.
1803*/
1804/****************************************************************************/
1805
1806int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
1807 void *mem, /* Virtual address that we want to get a map of */
1808 size_t numBytes /* Number of bytes being mapped */
1809 ) {
1810 unsigned long addr = (unsigned long)mem;
1811 unsigned int offset;
1812 int rc = 0;
1813 DMA_Region_t *region;
1814 dma_addr_t physAddr;
1815
1816 down(&memMap->lock);
1817
1818 DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
1819
1820 if (!memMap->inUse) {
1821 printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
1822 __func__);
1823 rc = -EINVAL;
1824 goto out;
1825 }
1826
1827 /* Reallocate to hold more regions. */
1828
1829 if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
1830 DMA_Region_t *newRegion;
1831 size_t oldSize =
1832 memMap->numRegionsAllocated * sizeof(*newRegion);
1833 int newAlloc = memMap->numRegionsAllocated + 4;
1834 size_t newSize = newAlloc * sizeof(*newRegion);
1835
1836 newRegion = kmalloc(newSize, GFP_KERNEL);
1837 if (newRegion == NULL) {
1838 rc = -ENOMEM;
1839 goto out;
1840 }
1841 memcpy(newRegion, memMap->region, oldSize);
1842 memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
1843
1844 kfree(memMap->region);
1845
1846 memMap->numRegionsAllocated = newAlloc;
1847 memMap->region = newRegion;
1848 }
1849
1850 region = &memMap->region[memMap->numRegionsUsed];
1851 memMap->numRegionsUsed++;
1852
1853 offset = addr & ~PAGE_MASK;
1854
1855 region->memType = dma_mem_type(mem);
1856 region->virtAddr = mem;
1857 region->numBytes = numBytes;
1858 region->numSegmentsUsed = 0;
1859 region->numLockedPages = 0;
1860 region->lockedPages = NULL;
1861
1862 switch (region->memType) {
1863 case DMA_MEM_TYPE_VMALLOC:
1864 {
1865 atomic_inc(&gDmaStatMemTypeVmalloc);
1866
1867 /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
1868
1869 /* vmalloc'd pages are not physically contiguous */
1870
1871 rc = -EINVAL;
1872 break;
1873 }
1874
1875 case DMA_MEM_TYPE_KMALLOC:
1876 {
1877 atomic_inc(&gDmaStatMemTypeKmalloc);
1878
1879 /* kmalloc'd pages are physically contiguous, so they'll have exactly */
1880 /* one segment */
1881
1882#if ALLOW_MAP_OF_KMALLOC_MEMORY
1883 physAddr =
1884 dma_map_single(NULL, mem, numBytes, memMap->dir);
1885 rc = dma_map_add_segment(memMap, region, mem, physAddr,
1886 numBytes);
1887#else
1888 rc = -EINVAL;
1889#endif
1890 break;
1891 }
1892
1893 case DMA_MEM_TYPE_DMA:
1894 {
1895 /* dma_alloc_xxx pages are physically contiguous */
1896
1897 atomic_inc(&gDmaStatMemTypeCoherent);
1898
1899 physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
1900
1901 dma_sync_single_for_cpu(NULL, physAddr, numBytes,
1902 memMap->dir);
1903 rc = dma_map_add_segment(memMap, region, mem, physAddr,
1904 numBytes);
1905 break;
1906 }
1907
1908 case DMA_MEM_TYPE_USER:
1909 {
1910 size_t firstPageOffset;
1911 size_t firstPageSize;
1912 struct page **pages;
1913 struct task_struct *userTask;
1914
1915 atomic_inc(&gDmaStatMemTypeUser);
1916
1917#if 1
1918 /* If the pages are user pages, then the dma_mem_map_set_user_task function */
1919 /* must have been previously called. */
1920
1921 if (memMap->userTask == NULL) {
1922 printk(KERN_ERR
1923 "%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
1924 __func__);
1925 return -EINVAL;
1926 }
1927
1928 /* User pages need to be locked. */
1929
1930 firstPageOffset =
1931 (unsigned long)region->virtAddr & (PAGE_SIZE - 1);
1932 firstPageSize = PAGE_SIZE - firstPageOffset;
1933
1934 region->numLockedPages = (firstPageOffset
1935 + region->numBytes +
1936 PAGE_SIZE - 1) / PAGE_SIZE;
1937 pages =
1938 kmalloc(region->numLockedPages *
1939 sizeof(struct page *), GFP_KERNEL);
1940
1941 if (pages == NULL) {
1942 region->numLockedPages = 0;
1943 return -ENOMEM;
1944 }
1945
1946 userTask = memMap->userTask;
1947
1948 down_read(&userTask->mm->mmap_sem);
1949 rc = get_user_pages(userTask, /* task */
1950 userTask->mm, /* mm */
1951 (unsigned long)region->virtAddr, /* start */
1952 region->numLockedPages, /* len */
1953 memMap->dir == DMA_FROM_DEVICE, /* write */
1954 0, /* force */
1955 pages, /* pages (array of pointers to page) */
1956 NULL); /* vmas */
1957 up_read(&userTask->mm->mmap_sem);
1958
1959 if (rc != region->numLockedPages) {
1960 kfree(pages);
1961 region->numLockedPages = 0;
1962
1963 if (rc >= 0) {
1964 rc = -EINVAL;
1965 }
1966 } else {
1967 uint8_t *virtAddr = region->virtAddr;
1968 size_t bytesRemaining;
1969 int pageIdx;
1970
1971 rc = 0; /* Since get_user_pages returns +ve number */
1972
1973 region->lockedPages = pages;
1974
1975 /* We've locked the user pages. Now we need to walk them and figure */
1976 /* out the physical addresses. */
1977
1978 /* The first page may be partial */
1979
1980 dma_map_add_segment(memMap,
1981 region,
1982 virtAddr,
1983 PFN_PHYS(page_to_pfn
1984 (pages[0])) +
1985 firstPageOffset,
1986 firstPageSize);
1987
1988 virtAddr += firstPageSize;
1989 bytesRemaining =
1990 region->numBytes - firstPageSize;
1991
1992 for (pageIdx = 1;
1993 pageIdx < region->numLockedPages;
1994 pageIdx++) {
1995 size_t bytesThisPage =
1996 (bytesRemaining >
1997 PAGE_SIZE ? PAGE_SIZE :
1998 bytesRemaining);
1999
2000 DMA_MAP_PRINT
2001 ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
2002 pageIdx, pages[pageIdx],
2003 page_to_pfn(pages[pageIdx]),
2004 PFN_PHYS(page_to_pfn
2005 (pages[pageIdx])));
2006
2007 dma_map_add_segment(memMap,
2008 region,
2009 virtAddr,
2010 PFN_PHYS(page_to_pfn
2011 (pages
2012 [pageIdx])),
2013 bytesThisPage);
2014
2015 virtAddr += bytesThisPage;
2016 bytesRemaining -= bytesThisPage;
2017 }
2018 }
2019#else
2020 printk(KERN_ERR
2021 "%s: User mode pages are not yet supported\n",
2022 __func__);
2023
2024 /* user pages are not physically contiguous */
2025
2026 rc = -EINVAL;
2027#endif
2028 break;
2029 }
2030
2031 default:
2032 {
2033 printk(KERN_ERR "%s: Unsupported memory type: %d\n",
2034 __func__, region->memType);
2035
2036 rc = -EINVAL;
2037 break;
2038 }
2039 }
2040
2041 if (rc != 0) {
2042 memMap->numRegionsUsed--;
2043 }
2044
2045out:
2046
2047 DMA_MAP_PRINT("returning %d\n", rc);
2048
2049 up(&memMap->lock);
2050
2051 return rc;
2052}
2053
2054EXPORT_SYMBOL(dma_map_add_segment);
2055
2056/****************************************************************************/
2057/**
2058* Maps in a memory region such that it can be used for performing a DMA.
2059*
2060* @return 0 on success, error code otherwise.
2061*/
2062/****************************************************************************/
2063
2064int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
2065 void *mem, /* Virtual address that we want to get a map of */
2066 size_t numBytes, /* Number of bytes being mapped */
2067 enum dma_data_direction dir /* Direction that the mapping will be going */
2068 ) {
2069 int rc;
2070
2071 rc = dma_map_start(memMap, dir);
2072 if (rc == 0) {
2073 rc = dma_map_add_region(memMap, mem, numBytes);
2074 if (rc < 0) {
2075 /* Since the add fails, this function will fail, and the caller won't */
2076 /* call unmap, so we need to do it here. */
2077
2078 dma_unmap(memMap, 0);
2079 }
2080 }
2081
2082 return rc;
2083}
2084
2085EXPORT_SYMBOL(dma_map_mem);
2086
2087/****************************************************************************/
2088/**
2089* Setup a descriptor ring for a given memory map.
2090*
2091* It is assumed that the descriptor ring has already been initialized, and
2092* this routine will only reallocate a new descriptor ring if the existing
2093* one is too small.
2094*
2095* @return 0 on success, error code otherwise.
2096*/
2097/****************************************************************************/
2098
2099int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
2100 DMA_MemMap_t *memMap, /* Memory map that will be used */
2101 dma_addr_t devPhysAddr /* Physical address of device */
2102 ) {
2103 int rc;
2104 int numDescriptors;
2105 DMA_DeviceAttribute_t *devAttr;
2106 DMA_Region_t *region;
2107 DMA_Segment_t *segment;
2108 dma_addr_t srcPhysAddr;
2109 dma_addr_t dstPhysAddr;
2110 int regionIdx;
2111 int segmentIdx;
2112
2113 devAttr = &DMA_gDeviceAttribute[dev];
2114
2115 down(&memMap->lock);
2116
2117 /* Figure out how many descriptors we need */
2118
2119 numDescriptors = 0;
2120 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2121 region = &memMap->region[regionIdx];
2122
2123 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2124 segmentIdx++) {
2125 segment = &region->segment[segmentIdx];
2126
2127 if (memMap->dir == DMA_TO_DEVICE) {
2128 srcPhysAddr = segment->physAddr;
2129 dstPhysAddr = devPhysAddr;
2130 } else {
2131 srcPhysAddr = devPhysAddr;
2132 dstPhysAddr = segment->physAddr;
2133 }
2134
2135 rc =
2136 dma_calculate_descriptor_count(dev, srcPhysAddr,
2137 dstPhysAddr,
2138 segment->
2139 numBytes);
2140 if (rc < 0) {
2141 printk(KERN_ERR
2142 "%s: dma_calculate_descriptor_count failed: %d\n",
2143 __func__, rc);
2144 goto out;
2145 }
2146 numDescriptors += rc;
2147 }
2148 }
2149
2150 /* Adjust the size of the ring, if it isn't big enough */
2151
2152 if (numDescriptors > devAttr->ring.descriptorsAllocated) {
2153 dma_free_descriptor_ring(&devAttr->ring);
2154 rc =
2155 dma_alloc_descriptor_ring(&devAttr->ring,
2156 numDescriptors);
2157 if (rc < 0) {
2158 printk(KERN_ERR
2159 "%s: dma_alloc_descriptor_ring failed: %d\n",
2160 __func__, rc);
2161 goto out;
2162 }
2163 } else {
2164 rc =
2165 dma_init_descriptor_ring(&devAttr->ring,
2166 numDescriptors);
2167 if (rc < 0) {
2168 printk(KERN_ERR
2169 "%s: dma_init_descriptor_ring failed: %d\n",
2170 __func__, rc);
2171 goto out;
2172 }
2173 }
2174
2175 /* Populate the descriptors */
2176
2177 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2178 region = &memMap->region[regionIdx];
2179
2180 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2181 segmentIdx++) {
2182 segment = &region->segment[segmentIdx];
2183
2184 if (memMap->dir == DMA_TO_DEVICE) {
2185 srcPhysAddr = segment->physAddr;
2186 dstPhysAddr = devPhysAddr;
2187 } else {
2188 srcPhysAddr = devPhysAddr;
2189 dstPhysAddr = segment->physAddr;
2190 }
2191
2192 rc =
2193 dma_add_descriptors(&devAttr->ring, dev,
2194 srcPhysAddr, dstPhysAddr,
2195 segment->numBytes);
2196 if (rc < 0) {
2197 printk(KERN_ERR
2198 "%s: dma_add_descriptors failed: %d\n",
2199 __func__, rc);
2200 goto out;
2201 }
2202 }
2203 }
2204
2205 rc = 0;
2206
2207out:
2208
2209 up(&memMap->lock);
2210 return rc;
2211}
2212
2213EXPORT_SYMBOL(dma_map_create_descriptor_ring);
2214
2215/****************************************************************************/
2216/**
2217* Maps in a memory region such that it can be used for performing a DMA.
2218*
2219* @return
2220*/
2221/****************************************************************************/
2222
2223int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2224 int dirtied /* non-zero if any of the pages were modified */
2225 ) {
2226
2227 int rc = 0;
2228 int regionIdx;
2229 int segmentIdx;
2230 DMA_Region_t *region;
2231 DMA_Segment_t *segment;
2232
2233 down(&memMap->lock);
2234
2235 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2236 region = &memMap->region[regionIdx];
2237
2238 for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2239 segmentIdx++) {
2240 segment = &region->segment[segmentIdx];
2241
2242 switch (region->memType) {
2243 case DMA_MEM_TYPE_VMALLOC:
2244 {
2245 printk(KERN_ERR
2246 "%s: vmalloc'd pages are not yet supported\n",
2247 __func__);
2248 rc = -EINVAL;
2249 goto out;
2250 }
2251
2252 case DMA_MEM_TYPE_KMALLOC:
2253 {
2254#if ALLOW_MAP_OF_KMALLOC_MEMORY
2255 dma_unmap_single(NULL,
2256 segment->physAddr,
2257 segment->numBytes,
2258 memMap->dir);
2259#endif
2260 break;
2261 }
2262
2263 case DMA_MEM_TYPE_DMA:
2264 {
2265 dma_sync_single_for_cpu(NULL,
2266 segment->
2267 physAddr,
2268 segment->
2269 numBytes,
2270 memMap->dir);
2271 break;
2272 }
2273
2274 case DMA_MEM_TYPE_USER:
2275 {
2276 /* Nothing to do here. */
2277
2278 break;
2279 }
2280
2281 default:
2282 {
2283 printk(KERN_ERR
2284 "%s: Unsupported memory type: %d\n",
2285 __func__, region->memType);
2286 rc = -EINVAL;
2287 goto out;
2288 }
2289 }
2290
2291 segment->virtAddr = NULL;
2292 segment->physAddr = 0;
2293 segment->numBytes = 0;
2294 }
2295
2296 if (region->numLockedPages > 0) {
2297 int pageIdx;
2298
2299 /* Some user pages were locked. We need to go and unlock them now. */
2300
2301 for (pageIdx = 0; pageIdx < region->numLockedPages;
2302 pageIdx++) {
2303 struct page *page =
2304 region->lockedPages[pageIdx];
2305
2306 if (memMap->dir == DMA_FROM_DEVICE) {
2307 SetPageDirty(page);
2308 }
2309 page_cache_release(page);
2310 }
2311 kfree(region->lockedPages);
2312 region->numLockedPages = 0;
2313 region->lockedPages = NULL;
2314 }
2315
2316 region->memType = DMA_MEM_TYPE_NONE;
2317 region->virtAddr = NULL;
2318 region->numBytes = 0;
2319 region->numSegmentsUsed = 0;
2320 }
2321 memMap->userTask = NULL;
2322 memMap->numRegionsUsed = 0;
2323 memMap->inUse = 0;
2324
2325out:
2326 up(&memMap->lock);
2327
2328 return rc;
2329}
2330
2331EXPORT_SYMBOL(dma_unmap);
diff --git a/arch/arm/mach-bcmring/include/mach/dma.h b/arch/arm/mach-bcmring/include/mach/dma.h
index 1f2c5319c056..72543781207b 100644
--- a/arch/arm/mach-bcmring/include/mach/dma.h
+++ b/arch/arm/mach-bcmring/include/mach/dma.h
@@ -26,15 +26,9 @@
26/* ---- Include Files ---------------------------------------------------- */ 26/* ---- Include Files ---------------------------------------------------- */
27 27
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/wait.h>
30#include <linux/semaphore.h> 29#include <linux/semaphore.h>
31#include <csp/dmacHw.h> 30#include <csp/dmacHw.h>
32#include <mach/timer.h> 31#include <mach/timer.h>
33#include <linux/scatterlist.h>
34#include <linux/dma-mapping.h>
35#include <linux/mm.h>
36#include <linux/vmalloc.h>
37#include <linux/pagemap.h>
38 32
39/* ---- Constants and Types ---------------------------------------------- */ 33/* ---- Constants and Types ---------------------------------------------- */
40 34
@@ -113,78 +107,6 @@ typedef struct {
113 107
114/**************************************************************************** 108/****************************************************************************
115* 109*
116* The DMA_MemType_t and DMA_MemMap_t are helper structures used to setup
117* DMA chains from a variety of memory sources.
118*
119*****************************************************************************/
120
121#define DMA_MEM_MAP_MIN_SIZE 4096 /* Pages less than this size are better */
122 /* off not being DMA'd. */
123
124typedef enum {
125 DMA_MEM_TYPE_NONE, /* Not a valid setting */
126 DMA_MEM_TYPE_VMALLOC, /* Memory came from vmalloc call */
127 DMA_MEM_TYPE_KMALLOC, /* Memory came from kmalloc call */
128 DMA_MEM_TYPE_DMA, /* Memory came from dma_alloc_xxx call */
129 DMA_MEM_TYPE_USER, /* Memory came from user space. */
130
131} DMA_MemType_t;
132
133/* A segment represents a physically and virtually contiguous chunk of memory. */
134/* i.e. each segment can be DMA'd */
135/* A user of the DMA code will add memory regions. Each region may need to be */
136/* represented by one or more segments. */
137
138typedef struct {
139 void *virtAddr; /* Virtual address used for this segment */
140 dma_addr_t physAddr; /* Physical address this segment maps to */
141 size_t numBytes; /* Size of the segment, in bytes */
142
143} DMA_Segment_t;
144
145/* A region represents a virtually contiguous chunk of memory, which may be */
146/* made up of multiple segments. */
147
148typedef struct {
149 DMA_MemType_t memType;
150 void *virtAddr;
151 size_t numBytes;
152
153 /* Each region (virtually contiguous) consists of one or more segments. Each */
154 /* segment is virtually and physically contiguous. */
155
156 int numSegmentsUsed;
157 int numSegmentsAllocated;
158 DMA_Segment_t *segment;
159
160 /* When a region corresponds to user memory, we need to lock all of the pages */
161 /* down before we can figure out the physical addresses. The lockedPage array contains */
162 /* the pages that were locked, and which subsequently need to be unlocked once the */
163 /* memory is unmapped. */
164
165 unsigned numLockedPages;
166 struct page **lockedPages;
167
168} DMA_Region_t;
169
170typedef struct {
171 int inUse; /* Is this mapping currently being used? */
172 struct semaphore lock; /* Acquired when using this structure */
173 enum dma_data_direction dir; /* Direction this transfer is intended for */
174
175 /* In the event that we're mapping user memory, we need to know which task */
176 /* the memory is for, so that we can obtain the correct mm locks. */
177
178 struct task_struct *userTask;
179
180 int numRegionsUsed;
181 int numRegionsAllocated;
182 DMA_Region_t *region;
183
184} DMA_MemMap_t;
185
186/****************************************************************************
187*
188* The DMA_DeviceAttribute_t contains information which describes a 110* The DMA_DeviceAttribute_t contains information which describes a
189* particular DMA device (or peripheral). 111* particular DMA device (or peripheral).
190* 112*
@@ -570,124 +492,6 @@ int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */
570 492
571/****************************************************************************/ 493/****************************************************************************/
572/** 494/**
573* Initializes a DMA_MemMap_t data structure
574*/
575/****************************************************************************/
576
577int dma_init_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */
578 );
579
580/****************************************************************************/
581/**
582* Releases any memory currently being held by a memory mapping structure.
583*/
584/****************************************************************************/
585
586int dma_term_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */
587 );
588
589/****************************************************************************/
590/**
591* Looks at a memory address and categorizes it.
592*
593* @return One of the values from the DMA_MemType_t enumeration.
594*/
595/****************************************************************************/
596
597DMA_MemType_t dma_mem_type(void *addr);
598
599/****************************************************************************/
600/**
601* Sets the process (aka userTask) associated with a mem map. This is
602* required if user-mode segments will be added to the mapping.
603*/
604/****************************************************************************/
605
606static inline void dma_mem_map_set_user_task(DMA_MemMap_t *memMap,
607 struct task_struct *task)
608{
609 memMap->userTask = task;
610}
611
612/****************************************************************************/
613/**
614* Looks at a memory address and determines if we support DMA'ing to/from
615* that type of memory.
616*
617* @return boolean -
618* return value != 0 means dma supported
619* return value == 0 means dma not supported
620*/
621/****************************************************************************/
622
623int dma_mem_supports_dma(void *addr);
624
625/****************************************************************************/
626/**
627* Initializes a memory map for use. Since this function acquires a
628* sempaphore within the memory map, it is VERY important that dma_unmap
629* be called when you're finished using the map.
630*/
631/****************************************************************************/
632
633int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
634 enum dma_data_direction dir /* Direction that the mapping will be going */
635 );
636
637/****************************************************************************/
638/**
639* Adds a segment of memory to a memory map.
640*
641* @return 0 on success, error code otherwise.
642*/
643/****************************************************************************/
644
645int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
646 void *mem, /* Virtual address that we want to get a map of */
647 size_t numBytes /* Number of bytes being mapped */
648 );
649
650/****************************************************************************/
651/**
652* Creates a descriptor ring from a memory mapping.
653*
654* @return 0 on success, error code otherwise.
655*/
656/****************************************************************************/
657
658int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
659 DMA_MemMap_t *memMap, /* Memory map that will be used */
660 dma_addr_t devPhysAddr /* Physical address of device */
661 );
662
663/****************************************************************************/
664/**
665* Maps in a memory region such that it can be used for performing a DMA.
666*
667* @return
668*/
669/****************************************************************************/
670
671int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
672 void *addr, /* Virtual address that we want to get a map of */
673 size_t count, /* Number of bytes being mapped */
674 enum dma_data_direction dir /* Direction that the mapping will be going */
675 );
676
677/****************************************************************************/
678/**
679* Maps in a memory region such that it can be used for performing a DMA.
680*
681* @return
682*/
683/****************************************************************************/
684
685int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
686 int dirtied /* non-zero if any of the pages were modified */
687 );
688
689/****************************************************************************/
690/**
691* Initiates a transfer when the descriptors have already been setup. 495* Initiates a transfer when the descriptors have already been setup.
692* 496*
693* This is a special case, and normally, the dma_transfer_xxx functions should 497* This is a special case, and normally, the dma_transfer_xxx functions should
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 6b22b543a83f..d5088900af6c 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -44,7 +44,7 @@
44#include <mach/aemif.h> 44#include <mach/aemif.h>
45#include <mach/spi.h> 45#include <mach/spi.h>
46 46
47#define DA850_EVM_PHY_ID "0:00" 47#define DA850_EVM_PHY_ID "davinci_mdio-0:00"
48#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8) 48#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8)
49#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15) 49#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15)
50 50
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 346e1de2f5a8..849311d3cb7c 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -54,7 +54,7 @@ static inline int have_tvp7002(void)
54 return 0; 54 return 0;
55} 55}
56 56
57#define DM365_EVM_PHY_ID "0:01" 57#define DM365_EVM_PHY_ID "davinci_mdio-0:01"
58/* 58/*
59 * A MAX-II CPLD is used for various board control functions. 59 * A MAX-II CPLD is used for various board control functions.
60 */ 60 */
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a64b49cfedca..1247ecdcf752 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -40,7 +40,7 @@
40#include <mach/usb.h> 40#include <mach/usb.h>
41#include <mach/aemif.h> 41#include <mach/aemif.h>
42 42
43#define DM644X_EVM_PHY_ID "0:01" 43#define DM644X_EVM_PHY_ID "davinci_mdio-0:01"
44#define LXT971_PHY_ID (0x001378e2) 44#define LXT971_PHY_ID (0x001378e2)
45#define LXT971_PHY_MASK (0xfffffff0) 45#define LXT971_PHY_MASK (0xfffffff0)
46 46
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 64017558860b..872ac69fa049 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -736,7 +736,7 @@ static struct davinci_uart_config uart_config __initdata = {
736 .enabled_uarts = (1 << 0), 736 .enabled_uarts = (1 << 0),
737}; 737};
738 738
739#define DM646X_EVM_PHY_ID "0:01" 739#define DM646X_EVM_PHY_ID "davinci_mdio-0:01"
740/* 740/*
741 * The following EDMA channels/slots are not being used by drivers (for 741 * The following EDMA channels/slots are not being used by drivers (for
742 * example: Timer, GPIO, UART events etc) on dm646x, hence they are being 742 * example: Timer, GPIO, UART events etc) on dm646x, hence they are being
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 6c4a16415d47..8d34f513d415 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -39,7 +39,7 @@
39#include <mach/mmc.h> 39#include <mach/mmc.h>
40#include <mach/usb.h> 40#include <mach/usb.h>
41 41
42#define NEUROS_OSD2_PHY_ID "0:01" 42#define NEUROS_OSD2_PHY_ID "davinci_mdio-0:01"
43#define LXT971_PHY_ID 0x001378e2 43#define LXT971_PHY_ID 0x001378e2
44#define LXT971_PHY_MASK 0xfffffff0 44#define LXT971_PHY_MASK 0xfffffff0
45 45
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index e7c0c7c53493..45e815760a27 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -21,7 +21,7 @@
21#include <mach/da8xx.h> 21#include <mach/da8xx.h>
22#include <mach/mux.h> 22#include <mach/mux.h>
23 23
24#define HAWKBOARD_PHY_ID "0:07" 24#define HAWKBOARD_PHY_ID "davinci_mdio-0:07"
25#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12) 25#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12)
26#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13) 26#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13)
27 27
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 0b136a831c59..31da3c5b2ba3 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -42,7 +42,7 @@
42#include <mach/mux.h> 42#include <mach/mux.h>
43#include <mach/usb.h> 43#include <mach/usb.h>
44 44
45#define SFFSDR_PHY_ID "0:01" 45#define SFFSDR_PHY_ID "davinci_mdio-0:01"
46static struct mtd_partition davinci_sffsdr_nandflash_partition[] = { 46static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
47 /* U-Boot Environment: Block 0 47 /* U-Boot Environment: Block 0
48 * UBL: Block 1 48 * UBL: Block 1
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 0ed7fdb64efb..992c4c410185 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -153,34 +153,6 @@ static struct clk pll1_sysclk3 = {
153 .div_reg = PLLDIV3, 153 .div_reg = PLLDIV3,
154}; 154};
155 155
156static struct clk pll1_sysclk4 = {
157 .name = "pll1_sysclk4",
158 .parent = &pll1_clk,
159 .flags = CLK_PLL,
160 .div_reg = PLLDIV4,
161};
162
163static struct clk pll1_sysclk5 = {
164 .name = "pll1_sysclk5",
165 .parent = &pll1_clk,
166 .flags = CLK_PLL,
167 .div_reg = PLLDIV5,
168};
169
170static struct clk pll1_sysclk6 = {
171 .name = "pll0_sysclk6",
172 .parent = &pll0_clk,
173 .flags = CLK_PLL,
174 .div_reg = PLLDIV6,
175};
176
177static struct clk pll1_sysclk7 = {
178 .name = "pll1_sysclk7",
179 .parent = &pll1_clk,
180 .flags = CLK_PLL,
181 .div_reg = PLLDIV7,
182};
183
184static struct clk i2c0_clk = { 156static struct clk i2c0_clk = {
185 .name = "i2c0", 157 .name = "i2c0",
186 .parent = &pll0_aux_clk, 158 .parent = &pll0_aux_clk,
@@ -397,10 +369,6 @@ static struct clk_lookup da850_clks[] = {
397 CLK(NULL, "pll1_aux", &pll1_aux_clk), 369 CLK(NULL, "pll1_aux", &pll1_aux_clk),
398 CLK(NULL, "pll1_sysclk2", &pll1_sysclk2), 370 CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
399 CLK(NULL, "pll1_sysclk3", &pll1_sysclk3), 371 CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
400 CLK(NULL, "pll1_sysclk4", &pll1_sysclk4),
401 CLK(NULL, "pll1_sysclk5", &pll1_sysclk5),
402 CLK(NULL, "pll1_sysclk6", &pll1_sysclk6),
403 CLK(NULL, "pll1_sysclk7", &pll1_sysclk7),
404 CLK("i2c_davinci.1", NULL, &i2c0_clk), 372 CLK("i2c_davinci.1", NULL, &i2c0_clk),
405 CLK(NULL, "timer0", &timerp64_0_clk), 373 CLK(NULL, "timer0", &timerp64_0_clk),
406 CLK("watchdog", NULL, &timerp64_1_clk), 374 CLK("watchdog", NULL, &timerp64_1_clk),
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 41e6612ecbaf..d965da45160e 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -213,13 +213,12 @@ config MACH_OMAP3_PANDORA
213 depends on ARCH_OMAP3 213 depends on ARCH_OMAP3
214 default y 214 default y
215 select OMAP_PACKAGE_CBB 215 select OMAP_PACKAGE_CBB
216 select REGULATOR_FIXED_VOLTAGE 216 select REGULATOR_FIXED_VOLTAGE if REGULATOR
217 217
218config MACH_OMAP3_TOUCHBOOK 218config MACH_OMAP3_TOUCHBOOK
219 bool "OMAP3 Touch Book" 219 bool "OMAP3 Touch Book"
220 depends on ARCH_OMAP3 220 depends on ARCH_OMAP3
221 default y 221 default y
222 select BACKLIGHT_CLASS_DEVICE
223 222
224config MACH_OMAP_3430SDP 223config MACH_OMAP_3430SDP
225 bool "OMAP 3430 SDP board" 224 bool "OMAP 3430 SDP board"
@@ -265,7 +264,7 @@ config MACH_OMAP_ZOOM2
265 select SERIAL_8250 264 select SERIAL_8250
266 select SERIAL_CORE_CONSOLE 265 select SERIAL_CORE_CONSOLE
267 select SERIAL_8250_CONSOLE 266 select SERIAL_8250_CONSOLE
268 select REGULATOR_FIXED_VOLTAGE 267 select REGULATOR_FIXED_VOLTAGE if REGULATOR
269 268
270config MACH_OMAP_ZOOM3 269config MACH_OMAP_ZOOM3
271 bool "OMAP3630 Zoom3 board" 270 bool "OMAP3630 Zoom3 board"
@@ -275,7 +274,7 @@ config MACH_OMAP_ZOOM3
275 select SERIAL_8250 274 select SERIAL_8250
276 select SERIAL_CORE_CONSOLE 275 select SERIAL_CORE_CONSOLE
277 select SERIAL_8250_CONSOLE 276 select SERIAL_8250_CONSOLE
278 select REGULATOR_FIXED_VOLTAGE 277 select REGULATOR_FIXED_VOLTAGE if REGULATOR
279 278
280config MACH_CM_T35 279config MACH_CM_T35
281 bool "CompuLab CM-T35/CM-T3730 modules" 280 bool "CompuLab CM-T35/CM-T3730 modules"
@@ -334,7 +333,7 @@ config MACH_OMAP_4430SDP
334 depends on ARCH_OMAP4 333 depends on ARCH_OMAP4
335 select OMAP_PACKAGE_CBL 334 select OMAP_PACKAGE_CBL
336 select OMAP_PACKAGE_CBS 335 select OMAP_PACKAGE_CBS
337 select REGULATOR_FIXED_VOLTAGE 336 select REGULATOR_FIXED_VOLTAGE if REGULATOR
338 337
339config MACH_OMAP4_PANDA 338config MACH_OMAP4_PANDA
340 bool "OMAP4 Panda Board" 339 bool "OMAP4 Panda Board"
@@ -342,7 +341,7 @@ config MACH_OMAP4_PANDA
342 depends on ARCH_OMAP4 341 depends on ARCH_OMAP4
343 select OMAP_PACKAGE_CBL 342 select OMAP_PACKAGE_CBL
344 select OMAP_PACKAGE_CBS 343 select OMAP_PACKAGE_CBS
345 select REGULATOR_FIXED_VOLTAGE 344 select REGULATOR_FIXED_VOLTAGE if REGULATOR
346 345
347config OMAP3_EMU 346config OMAP3_EMU
348 bool "OMAP3 debugging peripherals" 347 bool "OMAP3 debugging peripherals"
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 0b510ad01a00..283d11eae693 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -405,6 +405,7 @@ static int omap_mcspi_init(struct omap_hwmod *oh, void *unused)
405 break; 405 break;
406 default: 406 default:
407 pr_err("Invalid McSPI Revision value\n"); 407 pr_err("Invalid McSPI Revision value\n");
408 kfree(pdata);
408 return -EINVAL; 409 return -EINVAL;
409 } 410 }
410 411
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 130034bf01d5..dfffbbf4c009 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
528 528
529 case GPMC_CONFIG_DEV_SIZE: 529 case GPMC_CONFIG_DEV_SIZE:
530 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); 530 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
531
532 /* clear 2 target bits */
533 regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
534
535 /* set the proper value */
531 regval |= GPMC_CONFIG1_DEVICESIZE(wval); 536 regval |= GPMC_CONFIG1_DEVICESIZE(wval);
537
532 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval); 538 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
533 break; 539 break;
534 540
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index bd844af13af5..ad0adb5a1e0e 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -175,14 +175,15 @@ static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc)
175{ 175{
176 u32 reg; 176 u32 reg;
177 177
178 if (mmc->slots[0].internal_clock) { 178 reg = omap_ctrl_readl(control_devconf1_offset);
179 reg = omap_ctrl_readl(control_devconf1_offset); 179 if (mmc->slots[0].internal_clock)
180 reg |= OMAP2_MMCSDIO2ADPCLKISEL; 180 reg |= OMAP2_MMCSDIO2ADPCLKISEL;
181 omap_ctrl_writel(reg, control_devconf1_offset); 181 else
182 } 182 reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
183 omap_ctrl_writel(reg, control_devconf1_offset);
183} 184}
184 185
185static void hsmmc23_before_set_reg(struct device *dev, int slot, 186static void hsmmc2_before_set_reg(struct device *dev, int slot,
186 int power_on, int vdd) 187 int power_on, int vdd)
187{ 188{
188 struct omap_mmc_platform_data *mmc = dev->platform_data; 189 struct omap_mmc_platform_data *mmc = dev->platform_data;
@@ -407,14 +408,13 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
407 c->caps &= ~MMC_CAP_8_BIT_DATA; 408 c->caps &= ~MMC_CAP_8_BIT_DATA;
408 c->caps |= MMC_CAP_4_BIT_DATA; 409 c->caps |= MMC_CAP_4_BIT_DATA;
409 } 410 }
410 /* FALLTHROUGH */
411 case 3:
412 if (mmc->slots[0].features & HSMMC_HAS_PBIAS) { 411 if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
413 /* off-chip level shifting, or none */ 412 /* off-chip level shifting, or none */
414 mmc->slots[0].before_set_reg = hsmmc23_before_set_reg; 413 mmc->slots[0].before_set_reg = hsmmc2_before_set_reg;
415 mmc->slots[0].after_set_reg = NULL; 414 mmc->slots[0].after_set_reg = NULL;
416 } 415 }
417 break; 416 break;
417 case 3:
418 case 4: 418 case 4:
419 case 5: 419 case 5:
420 mmc->slots[0].before_set_reg = NULL; 420 mmc->slots[0].before_set_reg = NULL;
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 3f174d51f67f..eb50c29fb644 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -388,7 +388,7 @@ static void __init omap_hwmod_init_postsetup(void)
388 omap_pm_if_early_init(); 388 omap_pm_if_early_init();
389} 389}
390 390
391#ifdef CONFIG_ARCH_OMAP2 391#ifdef CONFIG_SOC_OMAP2420
392void __init omap2420_init_early(void) 392void __init omap2420_init_early(void)
393{ 393{
394 omap2_set_globals_242x(); 394 omap2_set_globals_242x();
@@ -400,7 +400,9 @@ void __init omap2420_init_early(void)
400 omap_hwmod_init_postsetup(); 400 omap_hwmod_init_postsetup();
401 omap2420_clk_init(); 401 omap2420_clk_init();
402} 402}
403#endif
403 404
405#ifdef CONFIG_SOC_OMAP2430
404void __init omap2430_init_early(void) 406void __init omap2430_init_early(void)
405{ 407{
406 omap2_set_globals_243x(); 408 omap2_set_globals_243x();
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index c11273da5dcc..f08e442af397 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -56,27 +56,6 @@ struct omap_hwmod_class omap2_dss_hwmod_class = {
56}; 56};
57 57
58/* 58/*
59 * 'dispc' class
60 * display controller
61 */
62
63static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
64 .rev_offs = 0x0000,
65 .sysc_offs = 0x0010,
66 .syss_offs = 0x0014,
67 .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
68 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
69 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
70 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
71 .sysc_fields = &omap_hwmod_sysc_type1,
72};
73
74struct omap_hwmod_class omap2_dispc_hwmod_class = {
75 .name = "dispc",
76 .sysc = &omap2_dispc_sysc,
77};
78
79/*
80 * 'rfbi' class 59 * 'rfbi' class
81 * remote frame buffer interface 60 * remote frame buffer interface
82 */ 61 */
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 177dee20faef..2a6729741b06 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -28,6 +28,28 @@ struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
28 { .name = "dispc", .dma_req = 5 }, 28 { .name = "dispc", .dma_req = 5 },
29 { .dma_req = -1 } 29 { .dma_req = -1 }
30}; 30};
31
32/*
33 * 'dispc' class
34 * display controller
35 */
36
37static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
38 .rev_offs = 0x0000,
39 .sysc_offs = 0x0010,
40 .syss_offs = 0x0014,
41 .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
42 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
43 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
44 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
45 .sysc_fields = &omap_hwmod_sysc_type1,
46};
47
48struct omap_hwmod_class omap2_dispc_hwmod_class = {
49 .name = "dispc",
50 .sysc = &omap2_dispc_sysc,
51};
52
31/* OMAP2xxx Timer Common */ 53/* OMAP2xxx Timer Common */
32static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = { 54static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
33 .rev_offs = 0x0000, 55 .rev_offs = 0x0000,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 5324e8d93bc0..3c8dd928628e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1480,6 +1480,28 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = {
1480 .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), 1480 .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
1481}; 1481};
1482 1482
1483/*
1484 * 'dispc' class
1485 * display controller
1486 */
1487
1488static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = {
1489 .rev_offs = 0x0000,
1490 .sysc_offs = 0x0010,
1491 .syss_offs = 0x0014,
1492 .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
1493 SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
1494 SYSC_HAS_ENAWAKEUP),
1495 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1496 MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
1497 .sysc_fields = &omap_hwmod_sysc_type1,
1498};
1499
1500static struct omap_hwmod_class omap3_dispc_hwmod_class = {
1501 .name = "dispc",
1502 .sysc = &omap3_dispc_sysc,
1503};
1504
1483/* l4_core -> dss_dispc */ 1505/* l4_core -> dss_dispc */
1484static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = { 1506static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
1485 .master = &omap3xxx_l4_core_hwmod, 1507 .master = &omap3xxx_l4_core_hwmod,
@@ -1503,7 +1525,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
1503 1525
1504static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { 1526static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
1505 .name = "dss_dispc", 1527 .name = "dss_dispc",
1506 .class = &omap2_dispc_hwmod_class, 1528 .class = &omap3_dispc_hwmod_class,
1507 .mpu_irqs = omap2_dispc_irqs, 1529 .mpu_irqs = omap2_dispc_irqs,
1508 .main_clk = "dss1_alwon_fck", 1530 .main_clk = "dss1_alwon_fck",
1509 .prcm = { 1531 .prcm = {
@@ -3523,12 +3545,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
3523 &omap3xxx_uart2_hwmod, 3545 &omap3xxx_uart2_hwmod,
3524 &omap3xxx_uart3_hwmod, 3546 &omap3xxx_uart3_hwmod,
3525 3547
3526 /* dss class */
3527 &omap3xxx_dss_dispc_hwmod,
3528 &omap3xxx_dss_dsi1_hwmod,
3529 &omap3xxx_dss_rfbi_hwmod,
3530 &omap3xxx_dss_venc_hwmod,
3531
3532 /* i2c class */ 3548 /* i2c class */
3533 &omap3xxx_i2c1_hwmod, 3549 &omap3xxx_i2c1_hwmod,
3534 &omap3xxx_i2c2_hwmod, 3550 &omap3xxx_i2c2_hwmod,
@@ -3635,6 +3651,15 @@ static __initdata struct omap_hwmod *am35xx_hwmods[] = {
3635 NULL 3651 NULL
3636}; 3652};
3637 3653
3654static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = {
3655 /* dss class */
3656 &omap3xxx_dss_dispc_hwmod,
3657 &omap3xxx_dss_dsi1_hwmod,
3658 &omap3xxx_dss_rfbi_hwmod,
3659 &omap3xxx_dss_venc_hwmod,
3660 NULL
3661};
3662
3638int __init omap3xxx_hwmod_init(void) 3663int __init omap3xxx_hwmod_init(void)
3639{ 3664{
3640 int r; 3665 int r;
@@ -3708,6 +3733,21 @@ int __init omap3xxx_hwmod_init(void)
3708 3733
3709 if (h) 3734 if (h)
3710 r = omap_hwmod_register(h); 3735 r = omap_hwmod_register(h);
3736 if (r < 0)
3737 return r;
3738
3739 /*
3740 * DSS code presumes that dss_core hwmod is handled first,
3741 * _before_ any other DSS related hwmods so register common
3742 * DSS hwmods last to ensure that dss_core is already registered.
3743 * Otherwise some change things may happen, for ex. if dispc
3744 * is handled before dss_core and DSS is enabled in bootloader
3745 * DIPSC will be reset with outputs enabled which sometimes leads
3746 * to unrecoverable L3 error.
3747 * XXX The long-term fix to this is to ensure modules are set up
3748 * in dependency order in the hwmod core code.
3749 */
3750 r = omap_hwmod_register(omap3xxx_dss_hwmods);
3711 3751
3712 return r; 3752 return r;
3713} 3753}
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index f9f151081760..ef0524c10a84 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -1031,6 +1031,7 @@ static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
1031 1031
1032static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = { 1032static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
1033 { 1033 {
1034 .name = "mpu",
1034 .pa_start = 0x4012e000, 1035 .pa_start = 0x4012e000,
1035 .pa_end = 0x4012e07f, 1036 .pa_end = 0x4012e07f,
1036 .flags = ADDR_TYPE_RT 1037 .flags = ADDR_TYPE_RT
@@ -1049,6 +1050,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
1049 1050
1050static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = { 1051static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
1051 { 1052 {
1053 .name = "dma",
1052 .pa_start = 0x4902e000, 1054 .pa_start = 0x4902e000,
1053 .pa_end = 0x4902e07f, 1055 .pa_end = 0x4902e07f,
1054 .flags = ADDR_TYPE_RT 1056 .flags = ADDR_TYPE_RT
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index c1c4d86a79a8..9ce765407ad5 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -19,6 +19,7 @@
19#include "common.h" 19#include "common.h"
20#include <plat/cpu.h> 20#include <plat/cpu.h>
21#include <plat/prcm.h> 21#include <plat/prcm.h>
22#include <plat/irqs.h>
22 23
23#include "vp.h" 24#include "vp.h"
24 25
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 9dd93453e563..7e755bb0ffc4 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -897,7 +897,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
897 ret = sr_late_init(sr_info); 897 ret = sr_late_init(sr_info);
898 if (ret) { 898 if (ret) {
899 pr_warning("%s: Error in SR late init\n", __func__); 899 pr_warning("%s: Error in SR late init\n", __func__);
900 return ret; 900 goto err_iounmap;
901 } 901 }
902 } 902 }
903 903
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 6eeff0e0ae01..5c9acea95761 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -270,7 +270,7 @@ static struct clocksource clocksource_gpt = {
270static u32 notrace dmtimer_read_sched_clock(void) 270static u32 notrace dmtimer_read_sched_clock(void)
271{ 271{
272 if (clksrc.reserved) 272 if (clksrc.reserved)
273 return __omap_dm_timer_read_counter(clksrc.io_base, 1); 273 return __omap_dm_timer_read_counter(&clksrc, 1);
274 274
275 return 0; 275 return 0;
276} 276}