diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 459 |
1 files changed, 244 insertions, 215 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 871e41c55e23..e86ba6e74b50 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -498,7 +498,11 @@ nocache: | |||
498 | } | 498 | } |
499 | 499 | ||
500 | found: | 500 | found: |
501 | if (addr + size > vend) | 501 | /* |
502 | * Check also calculated address against the vstart, | ||
503 | * because it can be 0 because of big align request. | ||
504 | */ | ||
505 | if (addr + size > vend || addr < vstart) | ||
502 | goto overflow; | 506 | goto overflow; |
503 | 507 | ||
504 | va->va_start = addr; | 508 | va->va_start = addr; |
@@ -840,7 +844,7 @@ static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) | |||
840 | * @order: how many 2^order pages should be occupied in newly allocated block | 844 | * @order: how many 2^order pages should be occupied in newly allocated block |
841 | * @gfp_mask: flags for the page level allocator | 845 | * @gfp_mask: flags for the page level allocator |
842 | * | 846 | * |
843 | * Returns: virtual address in a newly allocated block or ERR_PTR(-errno) | 847 | * Return: virtual address in a newly allocated block or ERR_PTR(-errno) |
844 | */ | 848 | */ |
845 | static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) | 849 | static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) |
846 | { | 850 | { |
@@ -1187,6 +1191,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro | |||
1187 | EXPORT_SYMBOL(vm_map_ram); | 1191 | EXPORT_SYMBOL(vm_map_ram); |
1188 | 1192 | ||
1189 | static struct vm_struct *vmlist __initdata; | 1193 | static struct vm_struct *vmlist __initdata; |
1194 | |||
1190 | /** | 1195 | /** |
1191 | * vm_area_add_early - add vmap area early during boot | 1196 | * vm_area_add_early - add vmap area early during boot |
1192 | * @vm: vm_struct to add | 1197 | * @vm: vm_struct to add |
@@ -1421,13 +1426,15 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1421 | } | 1426 | } |
1422 | 1427 | ||
1423 | /** | 1428 | /** |
1424 | * get_vm_area - reserve a contiguous kernel virtual area | 1429 | * get_vm_area - reserve a contiguous kernel virtual area |
1425 | * @size: size of the area | 1430 | * @size: size of the area |
1426 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC | 1431 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC |
1427 | * | 1432 | * |
1428 | * Search an area of @size in the kernel virtual mapping area, | 1433 | * Search an area of @size in the kernel virtual mapping area, |
1429 | * and reserved it for out purposes. Returns the area descriptor | 1434 | * and reserved it for out purposes. Returns the area descriptor |
1430 | * on success or %NULL on failure. | 1435 | * on success or %NULL on failure. |
1436 | * | ||
1437 | * Return: the area descriptor on success or %NULL on failure. | ||
1431 | */ | 1438 | */ |
1432 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | 1439 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) |
1433 | { | 1440 | { |
@@ -1444,12 +1451,14 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1444 | } | 1451 | } |
1445 | 1452 | ||
1446 | /** | 1453 | /** |
1447 | * find_vm_area - find a continuous kernel virtual area | 1454 | * find_vm_area - find a continuous kernel virtual area |
1448 | * @addr: base address | 1455 | * @addr: base address |
1456 | * | ||
1457 | * Search for the kernel VM area starting at @addr, and return it. | ||
1458 | * It is up to the caller to do all required locking to keep the returned | ||
1459 | * pointer valid. | ||
1449 | * | 1460 | * |
1450 | * Search for the kernel VM area starting at @addr, and return it. | 1461 | * Return: pointer to the found area or %NULL on faulure |
1451 | * It is up to the caller to do all required locking to keep the returned | ||
1452 | * pointer valid. | ||
1453 | */ | 1462 | */ |
1454 | struct vm_struct *find_vm_area(const void *addr) | 1463 | struct vm_struct *find_vm_area(const void *addr) |
1455 | { | 1464 | { |
@@ -1463,12 +1472,14 @@ struct vm_struct *find_vm_area(const void *addr) | |||
1463 | } | 1472 | } |
1464 | 1473 | ||
1465 | /** | 1474 | /** |
1466 | * remove_vm_area - find and remove a continuous kernel virtual area | 1475 | * remove_vm_area - find and remove a continuous kernel virtual area |
1467 | * @addr: base address | 1476 | * @addr: base address |
1468 | * | 1477 | * |
1469 | * Search for the kernel VM area starting at @addr, and remove it. | 1478 | * Search for the kernel VM area starting at @addr, and remove it. |
1470 | * This function returns the found VM area, but using it is NOT safe | 1479 | * This function returns the found VM area, but using it is NOT safe |
1471 | * on SMP machines, except for its size or flags. | 1480 | * on SMP machines, except for its size or flags. |
1481 | * | ||
1482 | * Return: pointer to the found area or %NULL on faulure | ||
1472 | */ | 1483 | */ |
1473 | struct vm_struct *remove_vm_area(const void *addr) | 1484 | struct vm_struct *remove_vm_area(const void *addr) |
1474 | { | 1485 | { |
@@ -1505,7 +1516,7 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
1505 | addr)) | 1516 | addr)) |
1506 | return; | 1517 | return; |
1507 | 1518 | ||
1508 | area = find_vmap_area((unsigned long)addr)->vm; | 1519 | area = find_vm_area(addr); |
1509 | if (unlikely(!area)) { | 1520 | if (unlikely(!area)) { |
1510 | WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", | 1521 | WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", |
1511 | addr); | 1522 | addr); |
@@ -1548,11 +1559,11 @@ static inline void __vfree_deferred(const void *addr) | |||
1548 | } | 1559 | } |
1549 | 1560 | ||
1550 | /** | 1561 | /** |
1551 | * vfree_atomic - release memory allocated by vmalloc() | 1562 | * vfree_atomic - release memory allocated by vmalloc() |
1552 | * @addr: memory base address | 1563 | * @addr: memory base address |
1553 | * | 1564 | * |
1554 | * This one is just like vfree() but can be called in any atomic context | 1565 | * This one is just like vfree() but can be called in any atomic context |
1555 | * except NMIs. | 1566 | * except NMIs. |
1556 | */ | 1567 | */ |
1557 | void vfree_atomic(const void *addr) | 1568 | void vfree_atomic(const void *addr) |
1558 | { | 1569 | { |
@@ -1565,21 +1576,29 @@ void vfree_atomic(const void *addr) | |||
1565 | __vfree_deferred(addr); | 1576 | __vfree_deferred(addr); |
1566 | } | 1577 | } |
1567 | 1578 | ||
1579 | static void __vfree(const void *addr) | ||
1580 | { | ||
1581 | if (unlikely(in_interrupt())) | ||
1582 | __vfree_deferred(addr); | ||
1583 | else | ||
1584 | __vunmap(addr, 1); | ||
1585 | } | ||
1586 | |||
1568 | /** | 1587 | /** |
1569 | * vfree - release memory allocated by vmalloc() | 1588 | * vfree - release memory allocated by vmalloc() |
1570 | * @addr: memory base address | 1589 | * @addr: memory base address |
1571 | * | 1590 | * |
1572 | * Free the virtually continuous memory area starting at @addr, as | 1591 | * Free the virtually continuous memory area starting at @addr, as |
1573 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is | 1592 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
1574 | * NULL, no operation is performed. | 1593 | * NULL, no operation is performed. |
1575 | * | 1594 | * |
1576 | * Must not be called in NMI context (strictly speaking, only if we don't | 1595 | * Must not be called in NMI context (strictly speaking, only if we don't |
1577 | * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling | 1596 | * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling |
1578 | * conventions for vfree() arch-depenedent would be a really bad idea) | 1597 | * conventions for vfree() arch-depenedent would be a really bad idea) |
1579 | * | 1598 | * |
1580 | * May sleep if called *not* from interrupt context. | 1599 | * May sleep if called *not* from interrupt context. |
1581 | * | 1600 | * |
1582 | * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) | 1601 | * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) |
1583 | */ | 1602 | */ |
1584 | void vfree(const void *addr) | 1603 | void vfree(const void *addr) |
1585 | { | 1604 | { |
@@ -1591,21 +1610,19 @@ void vfree(const void *addr) | |||
1591 | 1610 | ||
1592 | if (!addr) | 1611 | if (!addr) |
1593 | return; | 1612 | return; |
1594 | if (unlikely(in_interrupt())) | 1613 | |
1595 | __vfree_deferred(addr); | 1614 | __vfree(addr); |
1596 | else | ||
1597 | __vunmap(addr, 1); | ||
1598 | } | 1615 | } |
1599 | EXPORT_SYMBOL(vfree); | 1616 | EXPORT_SYMBOL(vfree); |
1600 | 1617 | ||
1601 | /** | 1618 | /** |
1602 | * vunmap - release virtual mapping obtained by vmap() | 1619 | * vunmap - release virtual mapping obtained by vmap() |
1603 | * @addr: memory base address | 1620 | * @addr: memory base address |
1604 | * | 1621 | * |
1605 | * Free the virtually contiguous memory area starting at @addr, | 1622 | * Free the virtually contiguous memory area starting at @addr, |
1606 | * which was created from the page array passed to vmap(). | 1623 | * which was created from the page array passed to vmap(). |
1607 | * | 1624 | * |
1608 | * Must not be called in interrupt context. | 1625 | * Must not be called in interrupt context. |
1609 | */ | 1626 | */ |
1610 | void vunmap(const void *addr) | 1627 | void vunmap(const void *addr) |
1611 | { | 1628 | { |
@@ -1617,17 +1634,19 @@ void vunmap(const void *addr) | |||
1617 | EXPORT_SYMBOL(vunmap); | 1634 | EXPORT_SYMBOL(vunmap); |
1618 | 1635 | ||
1619 | /** | 1636 | /** |
1620 | * vmap - map an array of pages into virtually contiguous space | 1637 | * vmap - map an array of pages into virtually contiguous space |
1621 | * @pages: array of page pointers | 1638 | * @pages: array of page pointers |
1622 | * @count: number of pages to map | 1639 | * @count: number of pages to map |
1623 | * @flags: vm_area->flags | 1640 | * @flags: vm_area->flags |
1624 | * @prot: page protection for the mapping | 1641 | * @prot: page protection for the mapping |
1625 | * | 1642 | * |
1626 | * Maps @count pages from @pages into contiguous kernel virtual | 1643 | * Maps @count pages from @pages into contiguous kernel virtual |
1627 | * space. | 1644 | * space. |
1645 | * | ||
1646 | * Return: the address of the area or %NULL on failure | ||
1628 | */ | 1647 | */ |
1629 | void *vmap(struct page **pages, unsigned int count, | 1648 | void *vmap(struct page **pages, unsigned int count, |
1630 | unsigned long flags, pgprot_t prot) | 1649 | unsigned long flags, pgprot_t prot) |
1631 | { | 1650 | { |
1632 | struct vm_struct *area; | 1651 | struct vm_struct *area; |
1633 | unsigned long size; /* In bytes */ | 1652 | unsigned long size; /* In bytes */ |
@@ -1709,25 +1728,27 @@ fail: | |||
1709 | warn_alloc(gfp_mask, NULL, | 1728 | warn_alloc(gfp_mask, NULL, |
1710 | "vmalloc: allocation failure, allocated %ld of %ld bytes", | 1729 | "vmalloc: allocation failure, allocated %ld of %ld bytes", |
1711 | (area->nr_pages*PAGE_SIZE), area->size); | 1730 | (area->nr_pages*PAGE_SIZE), area->size); |
1712 | vfree(area->addr); | 1731 | __vfree(area->addr); |
1713 | return NULL; | 1732 | return NULL; |
1714 | } | 1733 | } |
1715 | 1734 | ||
1716 | /** | 1735 | /** |
1717 | * __vmalloc_node_range - allocate virtually contiguous memory | 1736 | * __vmalloc_node_range - allocate virtually contiguous memory |
1718 | * @size: allocation size | 1737 | * @size: allocation size |
1719 | * @align: desired alignment | 1738 | * @align: desired alignment |
1720 | * @start: vm area range start | 1739 | * @start: vm area range start |
1721 | * @end: vm area range end | 1740 | * @end: vm area range end |
1722 | * @gfp_mask: flags for the page level allocator | 1741 | * @gfp_mask: flags for the page level allocator |
1723 | * @prot: protection mask for the allocated pages | 1742 | * @prot: protection mask for the allocated pages |
1724 | * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) | 1743 | * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) |
1725 | * @node: node to use for allocation or NUMA_NO_NODE | 1744 | * @node: node to use for allocation or NUMA_NO_NODE |
1726 | * @caller: caller's return address | 1745 | * @caller: caller's return address |
1727 | * | 1746 | * |
1728 | * Allocate enough pages to cover @size from the page level | 1747 | * Allocate enough pages to cover @size from the page level |
1729 | * allocator with @gfp_mask flags. Map them into contiguous | 1748 | * allocator with @gfp_mask flags. Map them into contiguous |
1730 | * kernel virtual space, using a pagetable protection of @prot. | 1749 | * kernel virtual space, using a pagetable protection of @prot. |
1750 | * | ||
1751 | * Return: the address of the area or %NULL on failure | ||
1731 | */ | 1752 | */ |
1732 | void *__vmalloc_node_range(unsigned long size, unsigned long align, | 1753 | void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1733 | unsigned long start, unsigned long end, gfp_t gfp_mask, | 1754 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
@@ -1768,25 +1789,35 @@ fail: | |||
1768 | return NULL; | 1789 | return NULL; |
1769 | } | 1790 | } |
1770 | 1791 | ||
1792 | /* | ||
1793 | * This is only for performance analysis of vmalloc and stress purpose. | ||
1794 | * It is required by vmalloc test module, therefore do not use it other | ||
1795 | * than that. | ||
1796 | */ | ||
1797 | #ifdef CONFIG_TEST_VMALLOC_MODULE | ||
1798 | EXPORT_SYMBOL_GPL(__vmalloc_node_range); | ||
1799 | #endif | ||
1800 | |||
1771 | /** | 1801 | /** |
1772 | * __vmalloc_node - allocate virtually contiguous memory | 1802 | * __vmalloc_node - allocate virtually contiguous memory |
1773 | * @size: allocation size | 1803 | * @size: allocation size |
1774 | * @align: desired alignment | 1804 | * @align: desired alignment |
1775 | * @gfp_mask: flags for the page level allocator | 1805 | * @gfp_mask: flags for the page level allocator |
1776 | * @prot: protection mask for the allocated pages | 1806 | * @prot: protection mask for the allocated pages |
1777 | * @node: node to use for allocation or NUMA_NO_NODE | 1807 | * @node: node to use for allocation or NUMA_NO_NODE |
1778 | * @caller: caller's return address | 1808 | * @caller: caller's return address |
1779 | * | 1809 | * |
1780 | * Allocate enough pages to cover @size from the page level | 1810 | * Allocate enough pages to cover @size from the page level |
1781 | * allocator with @gfp_mask flags. Map them into contiguous | 1811 | * allocator with @gfp_mask flags. Map them into contiguous |
1782 | * kernel virtual space, using a pagetable protection of @prot. | 1812 | * kernel virtual space, using a pagetable protection of @prot. |
1783 | * | 1813 | * |
1784 | * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL | 1814 | * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL |
1785 | * and __GFP_NOFAIL are not supported | 1815 | * and __GFP_NOFAIL are not supported |
1786 | * | 1816 | * |
1787 | * Any use of gfp flags outside of GFP_KERNEL should be consulted | 1817 | * Any use of gfp flags outside of GFP_KERNEL should be consulted |
1788 | * with mm people. | 1818 | * with mm people. |
1789 | * | 1819 | * |
1820 | * Return: pointer to the allocated memory or %NULL on error | ||
1790 | */ | 1821 | */ |
1791 | static void *__vmalloc_node(unsigned long size, unsigned long align, | 1822 | static void *__vmalloc_node(unsigned long size, unsigned long align, |
1792 | gfp_t gfp_mask, pgprot_t prot, | 1823 | gfp_t gfp_mask, pgprot_t prot, |
@@ -1818,13 +1849,16 @@ void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, | |||
1818 | } | 1849 | } |
1819 | 1850 | ||
1820 | /** | 1851 | /** |
1821 | * vmalloc - allocate virtually contiguous memory | 1852 | * vmalloc - allocate virtually contiguous memory |
1822 | * @size: allocation size | 1853 | * @size: allocation size |
1823 | * Allocate enough pages to cover @size from the page level | 1854 | * |
1824 | * allocator and map them into contiguous kernel virtual space. | 1855 | * Allocate enough pages to cover @size from the page level |
1856 | * allocator and map them into contiguous kernel virtual space. | ||
1857 | * | ||
1858 | * For tight control over page level allocator and protection flags | ||
1859 | * use __vmalloc() instead. | ||
1825 | * | 1860 | * |
1826 | * For tight control over page level allocator and protection flags | 1861 | * Return: pointer to the allocated memory or %NULL on error |
1827 | * use __vmalloc() instead. | ||
1828 | */ | 1862 | */ |
1829 | void *vmalloc(unsigned long size) | 1863 | void *vmalloc(unsigned long size) |
1830 | { | 1864 | { |
@@ -1834,14 +1868,17 @@ void *vmalloc(unsigned long size) | |||
1834 | EXPORT_SYMBOL(vmalloc); | 1868 | EXPORT_SYMBOL(vmalloc); |
1835 | 1869 | ||
1836 | /** | 1870 | /** |
1837 | * vzalloc - allocate virtually contiguous memory with zero fill | 1871 | * vzalloc - allocate virtually contiguous memory with zero fill |
1838 | * @size: allocation size | 1872 | * @size: allocation size |
1839 | * Allocate enough pages to cover @size from the page level | 1873 | * |
1840 | * allocator and map them into contiguous kernel virtual space. | 1874 | * Allocate enough pages to cover @size from the page level |
1841 | * The memory allocated is set to zero. | 1875 | * allocator and map them into contiguous kernel virtual space. |
1842 | * | 1876 | * The memory allocated is set to zero. |
1843 | * For tight control over page level allocator and protection flags | 1877 | * |
1844 | * use __vmalloc() instead. | 1878 | * For tight control over page level allocator and protection flags |
1879 | * use __vmalloc() instead. | ||
1880 | * | ||
1881 | * Return: pointer to the allocated memory or %NULL on error | ||
1845 | */ | 1882 | */ |
1846 | void *vzalloc(unsigned long size) | 1883 | void *vzalloc(unsigned long size) |
1847 | { | 1884 | { |
@@ -1856,34 +1893,30 @@ EXPORT_SYMBOL(vzalloc); | |||
1856 | * | 1893 | * |
1857 | * The resulting memory area is zeroed so it can be mapped to userspace | 1894 | * The resulting memory area is zeroed so it can be mapped to userspace |
1858 | * without leaking data. | 1895 | * without leaking data. |
1896 | * | ||
1897 | * Return: pointer to the allocated memory or %NULL on error | ||
1859 | */ | 1898 | */ |
1860 | void *vmalloc_user(unsigned long size) | 1899 | void *vmalloc_user(unsigned long size) |
1861 | { | 1900 | { |
1862 | struct vm_struct *area; | 1901 | return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, |
1863 | void *ret; | 1902 | GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, |
1864 | 1903 | VM_USERMAP, NUMA_NO_NODE, | |
1865 | ret = __vmalloc_node(size, SHMLBA, | 1904 | __builtin_return_address(0)); |
1866 | GFP_KERNEL | __GFP_ZERO, | ||
1867 | PAGE_KERNEL, NUMA_NO_NODE, | ||
1868 | __builtin_return_address(0)); | ||
1869 | if (ret) { | ||
1870 | area = find_vm_area(ret); | ||
1871 | area->flags |= VM_USERMAP; | ||
1872 | } | ||
1873 | return ret; | ||
1874 | } | 1905 | } |
1875 | EXPORT_SYMBOL(vmalloc_user); | 1906 | EXPORT_SYMBOL(vmalloc_user); |
1876 | 1907 | ||
1877 | /** | 1908 | /** |
1878 | * vmalloc_node - allocate memory on a specific node | 1909 | * vmalloc_node - allocate memory on a specific node |
1879 | * @size: allocation size | 1910 | * @size: allocation size |
1880 | * @node: numa node | 1911 | * @node: numa node |
1912 | * | ||
1913 | * Allocate enough pages to cover @size from the page level | ||
1914 | * allocator and map them into contiguous kernel virtual space. | ||
1881 | * | 1915 | * |
1882 | * Allocate enough pages to cover @size from the page level | 1916 | * For tight control over page level allocator and protection flags |
1883 | * allocator and map them into contiguous kernel virtual space. | 1917 | * use __vmalloc() instead. |
1884 | * | 1918 | * |
1885 | * For tight control over page level allocator and protection flags | 1919 | * Return: pointer to the allocated memory or %NULL on error |
1886 | * use __vmalloc() instead. | ||
1887 | */ | 1920 | */ |
1888 | void *vmalloc_node(unsigned long size, int node) | 1921 | void *vmalloc_node(unsigned long size, int node) |
1889 | { | 1922 | { |
@@ -1903,6 +1936,8 @@ EXPORT_SYMBOL(vmalloc_node); | |||
1903 | * | 1936 | * |
1904 | * For tight control over page level allocator and protection flags | 1937 | * For tight control over page level allocator and protection flags |
1905 | * use __vmalloc_node() instead. | 1938 | * use __vmalloc_node() instead. |
1939 | * | ||
1940 | * Return: pointer to the allocated memory or %NULL on error | ||
1906 | */ | 1941 | */ |
1907 | void *vzalloc_node(unsigned long size, int node) | 1942 | void *vzalloc_node(unsigned long size, int node) |
1908 | { | 1943 | { |
@@ -1912,17 +1947,18 @@ void *vzalloc_node(unsigned long size, int node) | |||
1912 | EXPORT_SYMBOL(vzalloc_node); | 1947 | EXPORT_SYMBOL(vzalloc_node); |
1913 | 1948 | ||
1914 | /** | 1949 | /** |
1915 | * vmalloc_exec - allocate virtually contiguous, executable memory | 1950 | * vmalloc_exec - allocate virtually contiguous, executable memory |
1916 | * @size: allocation size | 1951 | * @size: allocation size |
1917 | * | 1952 | * |
1918 | * Kernel-internal function to allocate enough pages to cover @size | 1953 | * Kernel-internal function to allocate enough pages to cover @size |
1919 | * the page level allocator and map them into contiguous and | 1954 | * the page level allocator and map them into contiguous and |
1920 | * executable kernel virtual space. | 1955 | * executable kernel virtual space. |
1956 | * | ||
1957 | * For tight control over page level allocator and protection flags | ||
1958 | * use __vmalloc() instead. | ||
1921 | * | 1959 | * |
1922 | * For tight control over page level allocator and protection flags | 1960 | * Return: pointer to the allocated memory or %NULL on error |
1923 | * use __vmalloc() instead. | ||
1924 | */ | 1961 | */ |
1925 | |||
1926 | void *vmalloc_exec(unsigned long size) | 1962 | void *vmalloc_exec(unsigned long size) |
1927 | { | 1963 | { |
1928 | return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, | 1964 | return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, |
@@ -1942,11 +1978,13 @@ void *vmalloc_exec(unsigned long size) | |||
1942 | #endif | 1978 | #endif |
1943 | 1979 | ||
1944 | /** | 1980 | /** |
1945 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) | 1981 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
1946 | * @size: allocation size | 1982 | * @size: allocation size |
1947 | * | 1983 | * |
1948 | * Allocate enough 32bit PA addressable pages to cover @size from the | 1984 | * Allocate enough 32bit PA addressable pages to cover @size from the |
1949 | * page level allocator and map them into contiguous kernel virtual space. | 1985 | * page level allocator and map them into contiguous kernel virtual space. |
1986 | * | ||
1987 | * Return: pointer to the allocated memory or %NULL on error | ||
1950 | */ | 1988 | */ |
1951 | void *vmalloc_32(unsigned long size) | 1989 | void *vmalloc_32(unsigned long size) |
1952 | { | 1990 | { |
@@ -1957,23 +1995,19 @@ EXPORT_SYMBOL(vmalloc_32); | |||
1957 | 1995 | ||
1958 | /** | 1996 | /** |
1959 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory | 1997 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
1960 | * @size: allocation size | 1998 | * @size: allocation size |
1961 | * | 1999 | * |
1962 | * The resulting memory area is 32bit addressable and zeroed so it can be | 2000 | * The resulting memory area is 32bit addressable and zeroed so it can be |
1963 | * mapped to userspace without leaking data. | 2001 | * mapped to userspace without leaking data. |
2002 | * | ||
2003 | * Return: pointer to the allocated memory or %NULL on error | ||
1964 | */ | 2004 | */ |
1965 | void *vmalloc_32_user(unsigned long size) | 2005 | void *vmalloc_32_user(unsigned long size) |
1966 | { | 2006 | { |
1967 | struct vm_struct *area; | 2007 | return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, |
1968 | void *ret; | 2008 | GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, |
1969 | 2009 | VM_USERMAP, NUMA_NO_NODE, | |
1970 | ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, | 2010 | __builtin_return_address(0)); |
1971 | NUMA_NO_NODE, __builtin_return_address(0)); | ||
1972 | if (ret) { | ||
1973 | area = find_vm_area(ret); | ||
1974 | area->flags |= VM_USERMAP; | ||
1975 | } | ||
1976 | return ret; | ||
1977 | } | 2011 | } |
1978 | EXPORT_SYMBOL(vmalloc_32_user); | 2012 | EXPORT_SYMBOL(vmalloc_32_user); |
1979 | 2013 | ||
@@ -2059,31 +2093,29 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count) | |||
2059 | } | 2093 | } |
2060 | 2094 | ||
2061 | /** | 2095 | /** |
2062 | * vread() - read vmalloc area in a safe way. | 2096 | * vread() - read vmalloc area in a safe way. |
2063 | * @buf: buffer for reading data | 2097 | * @buf: buffer for reading data |
2064 | * @addr: vm address. | 2098 | * @addr: vm address. |
2065 | * @count: number of bytes to be read. | 2099 | * @count: number of bytes to be read. |
2066 | * | 2100 | * |
2067 | * Returns # of bytes which addr and buf should be increased. | 2101 | * This function checks that addr is a valid vmalloc'ed area, and |
2068 | * (same number to @count). Returns 0 if [addr...addr+count) doesn't | 2102 | * copy data from that area to a given buffer. If the given memory range |
2069 | * includes any intersect with alive vmalloc area. | 2103 | * of [addr...addr+count) includes some valid address, data is copied to |
2070 | * | 2104 | * proper area of @buf. If there are memory holes, they'll be zero-filled. |
2071 | * This function checks that addr is a valid vmalloc'ed area, and | 2105 | * IOREMAP area is treated as memory hole and no copy is done. |
2072 | * copy data from that area to a given buffer. If the given memory range | 2106 | * |
2073 | * of [addr...addr+count) includes some valid address, data is copied to | 2107 | * If [addr...addr+count) doesn't includes any intersects with alive |
2074 | * proper area of @buf. If there are memory holes, they'll be zero-filled. | 2108 | * vm_struct area, returns 0. @buf should be kernel's buffer. |
2075 | * IOREMAP area is treated as memory hole and no copy is done. | 2109 | * |
2076 | * | 2110 | * Note: In usual ops, vread() is never necessary because the caller |
2077 | * If [addr...addr+count) doesn't includes any intersects with alive | 2111 | * should know vmalloc() area is valid and can use memcpy(). |
2078 | * vm_struct area, returns 0. @buf should be kernel's buffer. | 2112 | * This is for routines which have to access vmalloc area without |
2079 | * | 2113 | * any informaion, as /dev/kmem. |
2080 | * Note: In usual ops, vread() is never necessary because the caller | 2114 | * |
2081 | * should know vmalloc() area is valid and can use memcpy(). | 2115 | * Return: number of bytes for which addr and buf should be increased |
2082 | * This is for routines which have to access vmalloc area without | 2116 | * (same number as @count) or %0 if [addr...addr+count) doesn't |
2083 | * any informaion, as /dev/kmem. | 2117 | * include any intersection with valid vmalloc area |
2084 | * | ||
2085 | */ | 2118 | */ |
2086 | |||
2087 | long vread(char *buf, char *addr, unsigned long count) | 2119 | long vread(char *buf, char *addr, unsigned long count) |
2088 | { | 2120 | { |
2089 | struct vmap_area *va; | 2121 | struct vmap_area *va; |
@@ -2140,31 +2172,29 @@ finished: | |||
2140 | } | 2172 | } |
2141 | 2173 | ||
2142 | /** | 2174 | /** |
2143 | * vwrite() - write vmalloc area in a safe way. | 2175 | * vwrite() - write vmalloc area in a safe way. |
2144 | * @buf: buffer for source data | 2176 | * @buf: buffer for source data |
2145 | * @addr: vm address. | 2177 | * @addr: vm address. |
2146 | * @count: number of bytes to be read. | 2178 | * @count: number of bytes to be read. |
2147 | * | 2179 | * |
2148 | * Returns # of bytes which addr and buf should be incresed. | 2180 | * This function checks that addr is a valid vmalloc'ed area, and |
2149 | * (same number to @count). | 2181 | * copy data from a buffer to the given addr. If specified range of |
2150 | * If [addr...addr+count) doesn't includes any intersect with valid | 2182 | * [addr...addr+count) includes some valid address, data is copied from |
2151 | * vmalloc area, returns 0. | 2183 | * proper area of @buf. If there are memory holes, no copy to hole. |
2152 | * | 2184 | * IOREMAP area is treated as memory hole and no copy is done. |
2153 | * This function checks that addr is a valid vmalloc'ed area, and | 2185 | * |
2154 | * copy data from a buffer to the given addr. If specified range of | 2186 | * If [addr...addr+count) doesn't includes any intersects with alive |
2155 | * [addr...addr+count) includes some valid address, data is copied from | 2187 | * vm_struct area, returns 0. @buf should be kernel's buffer. |
2156 | * proper area of @buf. If there are memory holes, no copy to hole. | 2188 | * |
2157 | * IOREMAP area is treated as memory hole and no copy is done. | 2189 | * Note: In usual ops, vwrite() is never necessary because the caller |
2158 | * | 2190 | * should know vmalloc() area is valid and can use memcpy(). |
2159 | * If [addr...addr+count) doesn't includes any intersects with alive | 2191 | * This is for routines which have to access vmalloc area without |
2160 | * vm_struct area, returns 0. @buf should be kernel's buffer. | 2192 | * any informaion, as /dev/kmem. |
2161 | * | 2193 | * |
2162 | * Note: In usual ops, vwrite() is never necessary because the caller | 2194 | * Return: number of bytes for which addr and buf should be |
2163 | * should know vmalloc() area is valid and can use memcpy(). | 2195 | * increased (same number as @count) or %0 if [addr...addr+count) |
2164 | * This is for routines which have to access vmalloc area without | 2196 | * doesn't include any intersection with valid vmalloc area |
2165 | * any informaion, as /dev/kmem. | ||
2166 | */ | 2197 | */ |
2167 | |||
2168 | long vwrite(char *buf, char *addr, unsigned long count) | 2198 | long vwrite(char *buf, char *addr, unsigned long count) |
2169 | { | 2199 | { |
2170 | struct vmap_area *va; | 2200 | struct vmap_area *va; |
@@ -2216,20 +2246,20 @@ finished: | |||
2216 | } | 2246 | } |
2217 | 2247 | ||
2218 | /** | 2248 | /** |
2219 | * remap_vmalloc_range_partial - map vmalloc pages to userspace | 2249 | * remap_vmalloc_range_partial - map vmalloc pages to userspace |
2220 | * @vma: vma to cover | 2250 | * @vma: vma to cover |
2221 | * @uaddr: target user address to start at | 2251 | * @uaddr: target user address to start at |
2222 | * @kaddr: virtual address of vmalloc kernel memory | 2252 | * @kaddr: virtual address of vmalloc kernel memory |
2223 | * @size: size of map area | 2253 | * @size: size of map area |
2224 | * | 2254 | * |
2225 | * Returns: 0 for success, -Exxx on failure | 2255 | * Returns: 0 for success, -Exxx on failure |
2226 | * | 2256 | * |
2227 | * This function checks that @kaddr is a valid vmalloc'ed area, | 2257 | * This function checks that @kaddr is a valid vmalloc'ed area, |
2228 | * and that it is big enough to cover the range starting at | 2258 | * and that it is big enough to cover the range starting at |
2229 | * @uaddr in @vma. Will return failure if that criteria isn't | 2259 | * @uaddr in @vma. Will return failure if that criteria isn't |
2230 | * met. | 2260 | * met. |
2231 | * | 2261 | * |
2232 | * Similar to remap_pfn_range() (see mm/memory.c) | 2262 | * Similar to remap_pfn_range() (see mm/memory.c) |
2233 | */ | 2263 | */ |
2234 | int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, | 2264 | int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, |
2235 | void *kaddr, unsigned long size) | 2265 | void *kaddr, unsigned long size) |
@@ -2248,7 +2278,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, | |||
2248 | if (!(area->flags & VM_USERMAP)) | 2278 | if (!(area->flags & VM_USERMAP)) |
2249 | return -EINVAL; | 2279 | return -EINVAL; |
2250 | 2280 | ||
2251 | if (kaddr + size > area->addr + area->size) | 2281 | if (kaddr + size > area->addr + get_vm_area_size(area)) |
2252 | return -EINVAL; | 2282 | return -EINVAL; |
2253 | 2283 | ||
2254 | do { | 2284 | do { |
@@ -2271,18 +2301,18 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, | |||
2271 | EXPORT_SYMBOL(remap_vmalloc_range_partial); | 2301 | EXPORT_SYMBOL(remap_vmalloc_range_partial); |
2272 | 2302 | ||
2273 | /** | 2303 | /** |
2274 | * remap_vmalloc_range - map vmalloc pages to userspace | 2304 | * remap_vmalloc_range - map vmalloc pages to userspace |
2275 | * @vma: vma to cover (map full range of vma) | 2305 | * @vma: vma to cover (map full range of vma) |
2276 | * @addr: vmalloc memory | 2306 | * @addr: vmalloc memory |
2277 | * @pgoff: number of pages into addr before first page to map | 2307 | * @pgoff: number of pages into addr before first page to map |
2278 | * | 2308 | * |
2279 | * Returns: 0 for success, -Exxx on failure | 2309 | * Returns: 0 for success, -Exxx on failure |
2280 | * | 2310 | * |
2281 | * This function checks that addr is a valid vmalloc'ed area, and | 2311 | * This function checks that addr is a valid vmalloc'ed area, and |
2282 | * that it is big enough to cover the vma. Will return failure if | 2312 | * that it is big enough to cover the vma. Will return failure if |
2283 | * that criteria isn't met. | 2313 | * that criteria isn't met. |
2284 | * | 2314 | * |
2285 | * Similar to remap_pfn_range() (see mm/memory.c) | 2315 | * Similar to remap_pfn_range() (see mm/memory.c) |
2286 | */ | 2316 | */ |
2287 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | 2317 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
2288 | unsigned long pgoff) | 2318 | unsigned long pgoff) |
@@ -2314,18 +2344,18 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) | |||
2314 | } | 2344 | } |
2315 | 2345 | ||
2316 | /** | 2346 | /** |
2317 | * alloc_vm_area - allocate a range of kernel address space | 2347 | * alloc_vm_area - allocate a range of kernel address space |
2318 | * @size: size of the area | 2348 | * @size: size of the area |
2319 | * @ptes: returns the PTEs for the address space | 2349 | * @ptes: returns the PTEs for the address space |
2320 | * | 2350 | * |
2321 | * Returns: NULL on failure, vm_struct on success | 2351 | * Returns: NULL on failure, vm_struct on success |
2322 | * | 2352 | * |
2323 | * This function reserves a range of kernel address space, and | 2353 | * This function reserves a range of kernel address space, and |
2324 | * allocates pagetables to map that range. No actual mappings | 2354 | * allocates pagetables to map that range. No actual mappings |
2325 | * are created. | 2355 | * are created. |
2326 | * | 2356 | * |
2327 | * If @ptes is non-NULL, pointers to the PTEs (in init_mm) | 2357 | * If @ptes is non-NULL, pointers to the PTEs (in init_mm) |
2328 | * allocated for the VM area are returned. | 2358 | * allocated for the VM area are returned. |
2329 | */ | 2359 | */ |
2330 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) | 2360 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) |
2331 | { | 2361 | { |
@@ -2751,4 +2781,3 @@ static int __init proc_vmalloc_init(void) | |||
2751 | module_init(proc_vmalloc_init); | 2781 | module_init(proc_vmalloc_init); |
2752 | 2782 | ||
2753 | #endif | 2783 | #endif |
2754 | |||