diff options
author | Mike Rapoport <rppt@linux.ibm.com> | 2019-03-05 18:48:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 00:07:20 -0500 |
commit | 92eac16819e47ab919bd8f28ed49f8fadad0954e (patch) | |
tree | f647bda91e9b90d5279fa65ebb250ba69d42334a /mm/vmalloc.c | |
parent | 6d2bef9df7ccf3a2db0160be24f8b92a3f24708a (diff) |
docs/mm: vmalloc: re-indent kernel-doc comemnts
Some kernel-doc comments in mm/vmalloc.c have leading tab in
indentation. This leads to excessive indentation in the generated HTML
and to the inconsistency of its layout ([1] vs [2]).
Besides, multi-line Note: sections are not handled properly with extra
indentation.
[1] https://www.kernel.org/doc/html/v4.20/core-api/mm-api.html?#c.vm_map_ram
[2] https://www.kernel.org/doc/html/v4.20/core-api/mm-api.html?#c.vfree
Link: http://lkml.kernel.org/r/1549549644-4903-2-git-send-email-rppt@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 367 |
1 files changed, 182 insertions, 185 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 77006fa1a90b..03cbba890301 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1191,6 +1191,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro | |||
1191 | EXPORT_SYMBOL(vm_map_ram); | 1191 | EXPORT_SYMBOL(vm_map_ram); |
1192 | 1192 | ||
1193 | static struct vm_struct *vmlist __initdata; | 1193 | static struct vm_struct *vmlist __initdata; |
1194 | |||
1194 | /** | 1195 | /** |
1195 | * vm_area_add_early - add vmap area early during boot | 1196 | * vm_area_add_early - add vmap area early during boot |
1196 | * @vm: vm_struct to add | 1197 | * @vm: vm_struct to add |
@@ -1425,13 +1426,13 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1425 | } | 1426 | } |
1426 | 1427 | ||
1427 | /** | 1428 | /** |
1428 | * get_vm_area - reserve a contiguous kernel virtual area | 1429 | * get_vm_area - reserve a contiguous kernel virtual area |
1429 | * @size: size of the area | 1430 | * @size: size of the area |
1430 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC | 1431 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC |
1431 | * | 1432 | * |
1432 | * Search an area of @size in the kernel virtual mapping area, | 1433 | * Search an area of @size in the kernel virtual mapping area, |
1433 | * and reserved it for out purposes. Returns the area descriptor | 1434 | * and reserved it for out purposes. Returns the area descriptor |
1434 | * on success or %NULL on failure. | 1435 | * on success or %NULL on failure. |
1435 | */ | 1436 | */ |
1436 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | 1437 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) |
1437 | { | 1438 | { |
@@ -1448,12 +1449,12 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1448 | } | 1449 | } |
1449 | 1450 | ||
1450 | /** | 1451 | /** |
1451 | * find_vm_area - find a continuous kernel virtual area | 1452 | * find_vm_area - find a continuous kernel virtual area |
1452 | * @addr: base address | 1453 | * @addr: base address |
1453 | * | 1454 | * |
1454 | * Search for the kernel VM area starting at @addr, and return it. | 1455 | * Search for the kernel VM area starting at @addr, and return it. |
1455 | * It is up to the caller to do all required locking to keep the returned | 1456 | * It is up to the caller to do all required locking to keep the returned |
1456 | * pointer valid. | 1457 | * pointer valid. |
1457 | */ | 1458 | */ |
1458 | struct vm_struct *find_vm_area(const void *addr) | 1459 | struct vm_struct *find_vm_area(const void *addr) |
1459 | { | 1460 | { |
@@ -1467,12 +1468,12 @@ struct vm_struct *find_vm_area(const void *addr) | |||
1467 | } | 1468 | } |
1468 | 1469 | ||
1469 | /** | 1470 | /** |
1470 | * remove_vm_area - find and remove a continuous kernel virtual area | 1471 | * remove_vm_area - find and remove a continuous kernel virtual area |
1471 | * @addr: base address | 1472 | * @addr: base address |
1472 | * | 1473 | * |
1473 | * Search for the kernel VM area starting at @addr, and remove it. | 1474 | * Search for the kernel VM area starting at @addr, and remove it. |
1474 | * This function returns the found VM area, but using it is NOT safe | 1475 | * This function returns the found VM area, but using it is NOT safe |
1475 | * on SMP machines, except for its size or flags. | 1476 | * on SMP machines, except for its size or flags. |
1476 | */ | 1477 | */ |
1477 | struct vm_struct *remove_vm_area(const void *addr) | 1478 | struct vm_struct *remove_vm_area(const void *addr) |
1478 | { | 1479 | { |
@@ -1552,11 +1553,11 @@ static inline void __vfree_deferred(const void *addr) | |||
1552 | } | 1553 | } |
1553 | 1554 | ||
1554 | /** | 1555 | /** |
1555 | * vfree_atomic - release memory allocated by vmalloc() | 1556 | * vfree_atomic - release memory allocated by vmalloc() |
1556 | * @addr: memory base address | 1557 | * @addr: memory base address |
1557 | * | 1558 | * |
1558 | * This one is just like vfree() but can be called in any atomic context | 1559 | * This one is just like vfree() but can be called in any atomic context |
1559 | * except NMIs. | 1560 | * except NMIs. |
1560 | */ | 1561 | */ |
1561 | void vfree_atomic(const void *addr) | 1562 | void vfree_atomic(const void *addr) |
1562 | { | 1563 | { |
@@ -1578,20 +1579,20 @@ static void __vfree(const void *addr) | |||
1578 | } | 1579 | } |
1579 | 1580 | ||
1580 | /** | 1581 | /** |
1581 | * vfree - release memory allocated by vmalloc() | 1582 | * vfree - release memory allocated by vmalloc() |
1582 | * @addr: memory base address | 1583 | * @addr: memory base address |
1583 | * | 1584 | * |
1584 | * Free the virtually continuous memory area starting at @addr, as | 1585 | * Free the virtually continuous memory area starting at @addr, as |
1585 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is | 1586 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
1586 | * NULL, no operation is performed. | 1587 | * NULL, no operation is performed. |
1587 | * | 1588 | * |
1588 | * Must not be called in NMI context (strictly speaking, only if we don't | 1589 | * Must not be called in NMI context (strictly speaking, only if we don't |
1589 | * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling | 1590 | * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling |
1590 | * conventions for vfree() arch-depenedent would be a really bad idea) | 1591 | * conventions for vfree() arch-depenedent would be a really bad idea) |
1591 | * | 1592 | * |
1592 | * May sleep if called *not* from interrupt context. | 1593 | * May sleep if called *not* from interrupt context. |
1593 | * | 1594 | * |
1594 | * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) | 1595 | * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) |
1595 | */ | 1596 | */ |
1596 | void vfree(const void *addr) | 1597 | void vfree(const void *addr) |
1597 | { | 1598 | { |
@@ -1609,13 +1610,13 @@ void vfree(const void *addr) | |||
1609 | EXPORT_SYMBOL(vfree); | 1610 | EXPORT_SYMBOL(vfree); |
1610 | 1611 | ||
1611 | /** | 1612 | /** |
1612 | * vunmap - release virtual mapping obtained by vmap() | 1613 | * vunmap - release virtual mapping obtained by vmap() |
1613 | * @addr: memory base address | 1614 | * @addr: memory base address |
1614 | * | 1615 | * |
1615 | * Free the virtually contiguous memory area starting at @addr, | 1616 | * Free the virtually contiguous memory area starting at @addr, |
1616 | * which was created from the page array passed to vmap(). | 1617 | * which was created from the page array passed to vmap(). |
1617 | * | 1618 | * |
1618 | * Must not be called in interrupt context. | 1619 | * Must not be called in interrupt context. |
1619 | */ | 1620 | */ |
1620 | void vunmap(const void *addr) | 1621 | void vunmap(const void *addr) |
1621 | { | 1622 | { |
@@ -1627,17 +1628,17 @@ void vunmap(const void *addr) | |||
1627 | EXPORT_SYMBOL(vunmap); | 1628 | EXPORT_SYMBOL(vunmap); |
1628 | 1629 | ||
1629 | /** | 1630 | /** |
1630 | * vmap - map an array of pages into virtually contiguous space | 1631 | * vmap - map an array of pages into virtually contiguous space |
1631 | * @pages: array of page pointers | 1632 | * @pages: array of page pointers |
1632 | * @count: number of pages to map | 1633 | * @count: number of pages to map |
1633 | * @flags: vm_area->flags | 1634 | * @flags: vm_area->flags |
1634 | * @prot: page protection for the mapping | 1635 | * @prot: page protection for the mapping |
1635 | * | 1636 | * |
1636 | * Maps @count pages from @pages into contiguous kernel virtual | 1637 | * Maps @count pages from @pages into contiguous kernel virtual |
1637 | * space. | 1638 | * space. |
1638 | */ | 1639 | */ |
1639 | void *vmap(struct page **pages, unsigned int count, | 1640 | void *vmap(struct page **pages, unsigned int count, |
1640 | unsigned long flags, pgprot_t prot) | 1641 | unsigned long flags, pgprot_t prot) |
1641 | { | 1642 | { |
1642 | struct vm_struct *area; | 1643 | struct vm_struct *area; |
1643 | unsigned long size; /* In bytes */ | 1644 | unsigned long size; /* In bytes */ |
@@ -1724,20 +1725,20 @@ fail: | |||
1724 | } | 1725 | } |
1725 | 1726 | ||
1726 | /** | 1727 | /** |
1727 | * __vmalloc_node_range - allocate virtually contiguous memory | 1728 | * __vmalloc_node_range - allocate virtually contiguous memory |
1728 | * @size: allocation size | 1729 | * @size: allocation size |
1729 | * @align: desired alignment | 1730 | * @align: desired alignment |
1730 | * @start: vm area range start | 1731 | * @start: vm area range start |
1731 | * @end: vm area range end | 1732 | * @end: vm area range end |
1732 | * @gfp_mask: flags for the page level allocator | 1733 | * @gfp_mask: flags for the page level allocator |
1733 | * @prot: protection mask for the allocated pages | 1734 | * @prot: protection mask for the allocated pages |
1734 | * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) | 1735 | * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) |
1735 | * @node: node to use for allocation or NUMA_NO_NODE | 1736 | * @node: node to use for allocation or NUMA_NO_NODE |
1736 | * @caller: caller's return address | 1737 | * @caller: caller's return address |
1737 | * | 1738 | * |
1738 | * Allocate enough pages to cover @size from the page level | 1739 | * Allocate enough pages to cover @size from the page level |
1739 | * allocator with @gfp_mask flags. Map them into contiguous | 1740 | * allocator with @gfp_mask flags. Map them into contiguous |
1740 | * kernel virtual space, using a pagetable protection of @prot. | 1741 | * kernel virtual space, using a pagetable protection of @prot. |
1741 | */ | 1742 | */ |
1742 | void *__vmalloc_node_range(unsigned long size, unsigned long align, | 1743 | void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1743 | unsigned long start, unsigned long end, gfp_t gfp_mask, | 1744 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
@@ -1788,24 +1789,23 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_range); | |||
1788 | #endif | 1789 | #endif |
1789 | 1790 | ||
1790 | /** | 1791 | /** |
1791 | * __vmalloc_node - allocate virtually contiguous memory | 1792 | * __vmalloc_node - allocate virtually contiguous memory |
1792 | * @size: allocation size | 1793 | * @size: allocation size |
1793 | * @align: desired alignment | 1794 | * @align: desired alignment |
1794 | * @gfp_mask: flags for the page level allocator | 1795 | * @gfp_mask: flags for the page level allocator |
1795 | * @prot: protection mask for the allocated pages | 1796 | * @prot: protection mask for the allocated pages |
1796 | * @node: node to use for allocation or NUMA_NO_NODE | 1797 | * @node: node to use for allocation or NUMA_NO_NODE |
1797 | * @caller: caller's return address | 1798 | * @caller: caller's return address |
1798 | * | ||
1799 | * Allocate enough pages to cover @size from the page level | ||
1800 | * allocator with @gfp_mask flags. Map them into contiguous | ||
1801 | * kernel virtual space, using a pagetable protection of @prot. | ||
1802 | * | 1799 | * |
1803 | * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL | 1800 | * Allocate enough pages to cover @size from the page level |
1804 | * and __GFP_NOFAIL are not supported | 1801 | * allocator with @gfp_mask flags. Map them into contiguous |
1802 | * kernel virtual space, using a pagetable protection of @prot. | ||
1805 | * | 1803 | * |
1806 | * Any use of gfp flags outside of GFP_KERNEL should be consulted | 1804 | * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL |
1807 | * with mm people. | 1805 | * and __GFP_NOFAIL are not supported |
1808 | * | 1806 | * |
1807 | * Any use of gfp flags outside of GFP_KERNEL should be consulted | ||
1808 | * with mm people. | ||
1809 | */ | 1809 | */ |
1810 | static void *__vmalloc_node(unsigned long size, unsigned long align, | 1810 | static void *__vmalloc_node(unsigned long size, unsigned long align, |
1811 | gfp_t gfp_mask, pgprot_t prot, | 1811 | gfp_t gfp_mask, pgprot_t prot, |
@@ -1837,13 +1837,14 @@ void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, | |||
1837 | } | 1837 | } |
1838 | 1838 | ||
1839 | /** | 1839 | /** |
1840 | * vmalloc - allocate virtually contiguous memory | 1840 | * vmalloc - allocate virtually contiguous memory |
1841 | * @size: allocation size | 1841 | * @size: allocation size |
1842 | * Allocate enough pages to cover @size from the page level | 1842 | * |
1843 | * allocator and map them into contiguous kernel virtual space. | 1843 | * Allocate enough pages to cover @size from the page level |
1844 | * allocator and map them into contiguous kernel virtual space. | ||
1844 | * | 1845 | * |
1845 | * For tight control over page level allocator and protection flags | 1846 | * For tight control over page level allocator and protection flags |
1846 | * use __vmalloc() instead. | 1847 | * use __vmalloc() instead. |
1847 | */ | 1848 | */ |
1848 | void *vmalloc(unsigned long size) | 1849 | void *vmalloc(unsigned long size) |
1849 | { | 1850 | { |
@@ -1853,14 +1854,15 @@ void *vmalloc(unsigned long size) | |||
1853 | EXPORT_SYMBOL(vmalloc); | 1854 | EXPORT_SYMBOL(vmalloc); |
1854 | 1855 | ||
1855 | /** | 1856 | /** |
1856 | * vzalloc - allocate virtually contiguous memory with zero fill | 1857 | * vzalloc - allocate virtually contiguous memory with zero fill |
1857 | * @size: allocation size | 1858 | * @size: allocation size |
1858 | * Allocate enough pages to cover @size from the page level | 1859 | * |
1859 | * allocator and map them into contiguous kernel virtual space. | 1860 | * Allocate enough pages to cover @size from the page level |
1860 | * The memory allocated is set to zero. | 1861 | * allocator and map them into contiguous kernel virtual space. |
1861 | * | 1862 | * The memory allocated is set to zero. |
1862 | * For tight control over page level allocator and protection flags | 1863 | * |
1863 | * use __vmalloc() instead. | 1864 | * For tight control over page level allocator and protection flags |
1865 | * use __vmalloc() instead. | ||
1864 | */ | 1866 | */ |
1865 | void *vzalloc(unsigned long size) | 1867 | void *vzalloc(unsigned long size) |
1866 | { | 1868 | { |
@@ -1886,15 +1888,15 @@ void *vmalloc_user(unsigned long size) | |||
1886 | EXPORT_SYMBOL(vmalloc_user); | 1888 | EXPORT_SYMBOL(vmalloc_user); |
1887 | 1889 | ||
1888 | /** | 1890 | /** |
1889 | * vmalloc_node - allocate memory on a specific node | 1891 | * vmalloc_node - allocate memory on a specific node |
1890 | * @size: allocation size | 1892 | * @size: allocation size |
1891 | * @node: numa node | 1893 | * @node: numa node |
1892 | * | 1894 | * |
1893 | * Allocate enough pages to cover @size from the page level | 1895 | * Allocate enough pages to cover @size from the page level |
1894 | * allocator and map them into contiguous kernel virtual space. | 1896 | * allocator and map them into contiguous kernel virtual space. |
1895 | * | 1897 | * |
1896 | * For tight control over page level allocator and protection flags | 1898 | * For tight control over page level allocator and protection flags |
1897 | * use __vmalloc() instead. | 1899 | * use __vmalloc() instead. |
1898 | */ | 1900 | */ |
1899 | void *vmalloc_node(unsigned long size, int node) | 1901 | void *vmalloc_node(unsigned long size, int node) |
1900 | { | 1902 | { |
@@ -1923,17 +1925,16 @@ void *vzalloc_node(unsigned long size, int node) | |||
1923 | EXPORT_SYMBOL(vzalloc_node); | 1925 | EXPORT_SYMBOL(vzalloc_node); |
1924 | 1926 | ||
1925 | /** | 1927 | /** |
1926 | * vmalloc_exec - allocate virtually contiguous, executable memory | 1928 | * vmalloc_exec - allocate virtually contiguous, executable memory |
1927 | * @size: allocation size | 1929 | * @size: allocation size |
1928 | * | 1930 | * |
1929 | * Kernel-internal function to allocate enough pages to cover @size | 1931 | * Kernel-internal function to allocate enough pages to cover @size |
1930 | * the page level allocator and map them into contiguous and | 1932 | * the page level allocator and map them into contiguous and |
1931 | * executable kernel virtual space. | 1933 | * executable kernel virtual space. |
1932 | * | 1934 | * |
1933 | * For tight control over page level allocator and protection flags | 1935 | * For tight control over page level allocator and protection flags |
1934 | * use __vmalloc() instead. | 1936 | * use __vmalloc() instead. |
1935 | */ | 1937 | */ |
1936 | |||
1937 | void *vmalloc_exec(unsigned long size) | 1938 | void *vmalloc_exec(unsigned long size) |
1938 | { | 1939 | { |
1939 | return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, | 1940 | return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, |
@@ -1953,11 +1954,11 @@ void *vmalloc_exec(unsigned long size) | |||
1953 | #endif | 1954 | #endif |
1954 | 1955 | ||
1955 | /** | 1956 | /** |
1956 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) | 1957 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
1957 | * @size: allocation size | 1958 | * @size: allocation size |
1958 | * | 1959 | * |
1959 | * Allocate enough 32bit PA addressable pages to cover @size from the | 1960 | * Allocate enough 32bit PA addressable pages to cover @size from the |
1960 | * page level allocator and map them into contiguous kernel virtual space. | 1961 | * page level allocator and map them into contiguous kernel virtual space. |
1961 | */ | 1962 | */ |
1962 | void *vmalloc_32(unsigned long size) | 1963 | void *vmalloc_32(unsigned long size) |
1963 | { | 1964 | { |
@@ -1968,7 +1969,7 @@ EXPORT_SYMBOL(vmalloc_32); | |||
1968 | 1969 | ||
1969 | /** | 1970 | /** |
1970 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory | 1971 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
1971 | * @size: allocation size | 1972 | * @size: allocation size |
1972 | * | 1973 | * |
1973 | * The resulting memory area is 32bit addressable and zeroed so it can be | 1974 | * The resulting memory area is 32bit addressable and zeroed so it can be |
1974 | * mapped to userspace without leaking data. | 1975 | * mapped to userspace without leaking data. |
@@ -2064,31 +2065,29 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count) | |||
2064 | } | 2065 | } |
2065 | 2066 | ||
2066 | /** | 2067 | /** |
2067 | * vread() - read vmalloc area in a safe way. | 2068 | * vread() - read vmalloc area in a safe way. |
2068 | * @buf: buffer for reading data | 2069 | * @buf: buffer for reading data |
2069 | * @addr: vm address. | 2070 | * @addr: vm address. |
2070 | * @count: number of bytes to be read. | 2071 | * @count: number of bytes to be read. |
2071 | * | 2072 | * |
2072 | * Returns # of bytes which addr and buf should be increased. | 2073 | * Returns # of bytes which addr and buf should be increased. |
2073 | * (same number to @count). Returns 0 if [addr...addr+count) doesn't | 2074 | * (same number to @count). Returns 0 if [addr...addr+count) doesn't |
2074 | * includes any intersect with alive vmalloc area. | 2075 | * includes any intersect with alive vmalloc area. |
2075 | * | 2076 | * |
2076 | * This function checks that addr is a valid vmalloc'ed area, and | 2077 | * This function checks that addr is a valid vmalloc'ed area, and |
2077 | * copy data from that area to a given buffer. If the given memory range | 2078 | * copy data from that area to a given buffer. If the given memory range |
2078 | * of [addr...addr+count) includes some valid address, data is copied to | 2079 | * of [addr...addr+count) includes some valid address, data is copied to |
2079 | * proper area of @buf. If there are memory holes, they'll be zero-filled. | 2080 | * proper area of @buf. If there are memory holes, they'll be zero-filled. |
2080 | * IOREMAP area is treated as memory hole and no copy is done. | 2081 | * IOREMAP area is treated as memory hole and no copy is done. |
2081 | * | 2082 | * |
2082 | * If [addr...addr+count) doesn't includes any intersects with alive | 2083 | * If [addr...addr+count) doesn't includes any intersects with alive |
2083 | * vm_struct area, returns 0. @buf should be kernel's buffer. | 2084 | * vm_struct area, returns 0. @buf should be kernel's buffer. |
2084 | * | 2085 | * |
2085 | * Note: In usual ops, vread() is never necessary because the caller | 2086 | * Note: In usual ops, vread() is never necessary because the caller |
2086 | * should know vmalloc() area is valid and can use memcpy(). | 2087 | * should know vmalloc() area is valid and can use memcpy(). |
2087 | * This is for routines which have to access vmalloc area without | 2088 | * This is for routines which have to access vmalloc area without |
2088 | * any informaion, as /dev/kmem. | 2089 | * any informaion, as /dev/kmem. |
2089 | * | ||
2090 | */ | 2090 | */ |
2091 | |||
2092 | long vread(char *buf, char *addr, unsigned long count) | 2091 | long vread(char *buf, char *addr, unsigned long count) |
2093 | { | 2092 | { |
2094 | struct vmap_area *va; | 2093 | struct vmap_area *va; |
@@ -2145,31 +2144,30 @@ finished: | |||
2145 | } | 2144 | } |
2146 | 2145 | ||
2147 | /** | 2146 | /** |
2148 | * vwrite() - write vmalloc area in a safe way. | 2147 | * vwrite() - write vmalloc area in a safe way. |
2149 | * @buf: buffer for source data | 2148 | * @buf: buffer for source data |
2150 | * @addr: vm address. | 2149 | * @addr: vm address. |
2151 | * @count: number of bytes to be read. | 2150 | * @count: number of bytes to be read. |
2152 | * | 2151 | * |
2153 | * Returns # of bytes which addr and buf should be incresed. | 2152 | * Returns # of bytes which addr and buf should be incresed. |
2154 | * (same number to @count). | 2153 | * (same number to @count). |
2155 | * If [addr...addr+count) doesn't includes any intersect with valid | 2154 | * If [addr...addr+count) doesn't includes any intersect with valid |
2156 | * vmalloc area, returns 0. | 2155 | * vmalloc area, returns 0. |
2157 | * | 2156 | * |
2158 | * This function checks that addr is a valid vmalloc'ed area, and | 2157 | * This function checks that addr is a valid vmalloc'ed area, and |
2159 | * copy data from a buffer to the given addr. If specified range of | 2158 | * copy data from a buffer to the given addr. If specified range of |
2160 | * [addr...addr+count) includes some valid address, data is copied from | 2159 | * [addr...addr+count) includes some valid address, data is copied from |
2161 | * proper area of @buf. If there are memory holes, no copy to hole. | 2160 | * proper area of @buf. If there are memory holes, no copy to hole. |
2162 | * IOREMAP area is treated as memory hole and no copy is done. | 2161 | * IOREMAP area is treated as memory hole and no copy is done. |
2163 | * | 2162 | * |
2164 | * If [addr...addr+count) doesn't includes any intersects with alive | 2163 | * If [addr...addr+count) doesn't includes any intersects with alive |
2165 | * vm_struct area, returns 0. @buf should be kernel's buffer. | 2164 | * vm_struct area, returns 0. @buf should be kernel's buffer. |
2166 | * | 2165 | * |
2167 | * Note: In usual ops, vwrite() is never necessary because the caller | 2166 | * Note: In usual ops, vwrite() is never necessary because the caller |
2168 | * should know vmalloc() area is valid and can use memcpy(). | 2167 | * should know vmalloc() area is valid and can use memcpy(). |
2169 | * This is for routines which have to access vmalloc area without | 2168 | * This is for routines which have to access vmalloc area without |
2170 | * any informaion, as /dev/kmem. | 2169 | * any informaion, as /dev/kmem. |
2171 | */ | 2170 | */ |
2172 | |||
2173 | long vwrite(char *buf, char *addr, unsigned long count) | 2171 | long vwrite(char *buf, char *addr, unsigned long count) |
2174 | { | 2172 | { |
2175 | struct vmap_area *va; | 2173 | struct vmap_area *va; |
@@ -2221,20 +2219,20 @@ finished: | |||
2221 | } | 2219 | } |
2222 | 2220 | ||
2223 | /** | 2221 | /** |
2224 | * remap_vmalloc_range_partial - map vmalloc pages to userspace | 2222 | * remap_vmalloc_range_partial - map vmalloc pages to userspace |
2225 | * @vma: vma to cover | 2223 | * @vma: vma to cover |
2226 | * @uaddr: target user address to start at | 2224 | * @uaddr: target user address to start at |
2227 | * @kaddr: virtual address of vmalloc kernel memory | 2225 | * @kaddr: virtual address of vmalloc kernel memory |
2228 | * @size: size of map area | 2226 | * @size: size of map area |
2229 | * | 2227 | * |
2230 | * Returns: 0 for success, -Exxx on failure | 2228 | * Returns: 0 for success, -Exxx on failure |
2231 | * | 2229 | * |
2232 | * This function checks that @kaddr is a valid vmalloc'ed area, | 2230 | * This function checks that @kaddr is a valid vmalloc'ed area, |
2233 | * and that it is big enough to cover the range starting at | 2231 | * and that it is big enough to cover the range starting at |
2234 | * @uaddr in @vma. Will return failure if that criteria isn't | 2232 | * @uaddr in @vma. Will return failure if that criteria isn't |
2235 | * met. | 2233 | * met. |
2236 | * | 2234 | * |
2237 | * Similar to remap_pfn_range() (see mm/memory.c) | 2235 | * Similar to remap_pfn_range() (see mm/memory.c) |
2238 | */ | 2236 | */ |
2239 | int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, | 2237 | int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, |
2240 | void *kaddr, unsigned long size) | 2238 | void *kaddr, unsigned long size) |
@@ -2276,18 +2274,18 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, | |||
2276 | EXPORT_SYMBOL(remap_vmalloc_range_partial); | 2274 | EXPORT_SYMBOL(remap_vmalloc_range_partial); |
2277 | 2275 | ||
2278 | /** | 2276 | /** |
2279 | * remap_vmalloc_range - map vmalloc pages to userspace | 2277 | * remap_vmalloc_range - map vmalloc pages to userspace |
2280 | * @vma: vma to cover (map full range of vma) | 2278 | * @vma: vma to cover (map full range of vma) |
2281 | * @addr: vmalloc memory | 2279 | * @addr: vmalloc memory |
2282 | * @pgoff: number of pages into addr before first page to map | 2280 | * @pgoff: number of pages into addr before first page to map |
2283 | * | 2281 | * |
2284 | * Returns: 0 for success, -Exxx on failure | 2282 | * Returns: 0 for success, -Exxx on failure |
2285 | * | 2283 | * |
2286 | * This function checks that addr is a valid vmalloc'ed area, and | 2284 | * This function checks that addr is a valid vmalloc'ed area, and |
2287 | * that it is big enough to cover the vma. Will return failure if | 2285 | * that it is big enough to cover the vma. Will return failure if |
2288 | * that criteria isn't met. | 2286 | * that criteria isn't met. |
2289 | * | 2287 | * |
2290 | * Similar to remap_pfn_range() (see mm/memory.c) | 2288 | * Similar to remap_pfn_range() (see mm/memory.c) |
2291 | */ | 2289 | */ |
2292 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | 2290 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
2293 | unsigned long pgoff) | 2291 | unsigned long pgoff) |
@@ -2319,18 +2317,18 @@ static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) | |||
2319 | } | 2317 | } |
2320 | 2318 | ||
2321 | /** | 2319 | /** |
2322 | * alloc_vm_area - allocate a range of kernel address space | 2320 | * alloc_vm_area - allocate a range of kernel address space |
2323 | * @size: size of the area | 2321 | * @size: size of the area |
2324 | * @ptes: returns the PTEs for the address space | 2322 | * @ptes: returns the PTEs for the address space |
2325 | * | 2323 | * |
2326 | * Returns: NULL on failure, vm_struct on success | 2324 | * Returns: NULL on failure, vm_struct on success |
2327 | * | 2325 | * |
2328 | * This function reserves a range of kernel address space, and | 2326 | * This function reserves a range of kernel address space, and |
2329 | * allocates pagetables to map that range. No actual mappings | 2327 | * allocates pagetables to map that range. No actual mappings |
2330 | * are created. | 2328 | * are created. |
2331 | * | 2329 | * |
2332 | * If @ptes is non-NULL, pointers to the PTEs (in init_mm) | 2330 | * If @ptes is non-NULL, pointers to the PTEs (in init_mm) |
2333 | * allocated for the VM area are returned. | 2331 | * allocated for the VM area are returned. |
2334 | */ | 2332 | */ |
2335 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) | 2333 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) |
2336 | { | 2334 | { |
@@ -2756,4 +2754,3 @@ static int __init proc_vmalloc_init(void) | |||
2756 | module_init(proc_vmalloc_init); | 2754 | module_init(proc_vmalloc_init); |
2757 | 2755 | ||
2758 | #endif | 2756 | #endif |
2759 | |||