diff options
author | David Rientjes <rientjes@google.com> | 2013-02-22 19:35:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:21 -0500 |
commit | 00ef2d2f84babb9b209f0fc003bc490c6bf1e6ef (patch) | |
tree | 9377f30bced99b458f7466ec1719ca284b4b2003 /mm/vmalloc.c | |
parent | 751efd8610d3d7d67b7bdf7f62646edea7365dd7 (diff) |
mm: use NUMA_NO_NODE
Make a sweep through mm/ and convert code that uses -1 directly to using
the more appropriate NUMA_NO_NODE.
Signed-off-by: David Rientjes <rientjes@google.com>
Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 33 |
1 files changed, 18 insertions, 15 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 5123a169ab7b..0f751f2068c3 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1376,8 +1376,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, | |||
1376 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | 1376 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
1377 | unsigned long start, unsigned long end) | 1377 | unsigned long start, unsigned long end) |
1378 | { | 1378 | { |
1379 | return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, | 1379 | return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, |
1380 | __builtin_return_address(0)); | 1380 | GFP_KERNEL, __builtin_return_address(0)); |
1381 | } | 1381 | } |
1382 | EXPORT_SYMBOL_GPL(__get_vm_area); | 1382 | EXPORT_SYMBOL_GPL(__get_vm_area); |
1383 | 1383 | ||
@@ -1385,8 +1385,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1385 | unsigned long start, unsigned long end, | 1385 | unsigned long start, unsigned long end, |
1386 | const void *caller) | 1386 | const void *caller) |
1387 | { | 1387 | { |
1388 | return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, | 1388 | return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, |
1389 | caller); | 1389 | GFP_KERNEL, caller); |
1390 | } | 1390 | } |
1391 | 1391 | ||
1392 | /** | 1392 | /** |
@@ -1401,14 +1401,15 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1401 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | 1401 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) |
1402 | { | 1402 | { |
1403 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, | 1403 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
1404 | -1, GFP_KERNEL, __builtin_return_address(0)); | 1404 | NUMA_NO_NODE, GFP_KERNEL, |
1405 | __builtin_return_address(0)); | ||
1405 | } | 1406 | } |
1406 | 1407 | ||
1407 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | 1408 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, |
1408 | const void *caller) | 1409 | const void *caller) |
1409 | { | 1410 | { |
1410 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, | 1411 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
1411 | -1, GFP_KERNEL, caller); | 1412 | NUMA_NO_NODE, GFP_KERNEL, caller); |
1412 | } | 1413 | } |
1413 | 1414 | ||
1414 | /** | 1415 | /** |
@@ -1650,7 +1651,7 @@ fail: | |||
1650 | * @end: vm area range end | 1651 | * @end: vm area range end |
1651 | * @gfp_mask: flags for the page level allocator | 1652 | * @gfp_mask: flags for the page level allocator |
1652 | * @prot: protection mask for the allocated pages | 1653 | * @prot: protection mask for the allocated pages |
1653 | * @node: node to use for allocation or -1 | 1654 | * @node: node to use for allocation or NUMA_NO_NODE |
1654 | * @caller: caller's return address | 1655 | * @caller: caller's return address |
1655 | * | 1656 | * |
1656 | * Allocate enough pages to cover @size from the page level | 1657 | * Allocate enough pages to cover @size from the page level |
@@ -1706,7 +1707,7 @@ fail: | |||
1706 | * @align: desired alignment | 1707 | * @align: desired alignment |
1707 | * @gfp_mask: flags for the page level allocator | 1708 | * @gfp_mask: flags for the page level allocator |
1708 | * @prot: protection mask for the allocated pages | 1709 | * @prot: protection mask for the allocated pages |
1709 | * @node: node to use for allocation or -1 | 1710 | * @node: node to use for allocation or NUMA_NO_NODE |
1710 | * @caller: caller's return address | 1711 | * @caller: caller's return address |
1711 | * | 1712 | * |
1712 | * Allocate enough pages to cover @size from the page level | 1713 | * Allocate enough pages to cover @size from the page level |
@@ -1723,7 +1724,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, | |||
1723 | 1724 | ||
1724 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 1725 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
1725 | { | 1726 | { |
1726 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, | 1727 | return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, |
1727 | __builtin_return_address(0)); | 1728 | __builtin_return_address(0)); |
1728 | } | 1729 | } |
1729 | EXPORT_SYMBOL(__vmalloc); | 1730 | EXPORT_SYMBOL(__vmalloc); |
@@ -1746,7 +1747,8 @@ static inline void *__vmalloc_node_flags(unsigned long size, | |||
1746 | */ | 1747 | */ |
1747 | void *vmalloc(unsigned long size) | 1748 | void *vmalloc(unsigned long size) |
1748 | { | 1749 | { |
1749 | return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM); | 1750 | return __vmalloc_node_flags(size, NUMA_NO_NODE, |
1751 | GFP_KERNEL | __GFP_HIGHMEM); | ||
1750 | } | 1752 | } |
1751 | EXPORT_SYMBOL(vmalloc); | 1753 | EXPORT_SYMBOL(vmalloc); |
1752 | 1754 | ||
@@ -1762,7 +1764,7 @@ EXPORT_SYMBOL(vmalloc); | |||
1762 | */ | 1764 | */ |
1763 | void *vzalloc(unsigned long size) | 1765 | void *vzalloc(unsigned long size) |
1764 | { | 1766 | { |
1765 | return __vmalloc_node_flags(size, -1, | 1767 | return __vmalloc_node_flags(size, NUMA_NO_NODE, |
1766 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); | 1768 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); |
1767 | } | 1769 | } |
1768 | EXPORT_SYMBOL(vzalloc); | 1770 | EXPORT_SYMBOL(vzalloc); |
@@ -1781,7 +1783,8 @@ void *vmalloc_user(unsigned long size) | |||
1781 | 1783 | ||
1782 | ret = __vmalloc_node(size, SHMLBA, | 1784 | ret = __vmalloc_node(size, SHMLBA, |
1783 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 1785 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
1784 | PAGE_KERNEL, -1, __builtin_return_address(0)); | 1786 | PAGE_KERNEL, NUMA_NO_NODE, |
1787 | __builtin_return_address(0)); | ||
1785 | if (ret) { | 1788 | if (ret) { |
1786 | area = find_vm_area(ret); | 1789 | area = find_vm_area(ret); |
1787 | area->flags |= VM_USERMAP; | 1790 | area->flags |= VM_USERMAP; |
@@ -1846,7 +1849,7 @@ EXPORT_SYMBOL(vzalloc_node); | |||
1846 | void *vmalloc_exec(unsigned long size) | 1849 | void *vmalloc_exec(unsigned long size) |
1847 | { | 1850 | { |
1848 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, | 1851 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, |
1849 | -1, __builtin_return_address(0)); | 1852 | NUMA_NO_NODE, __builtin_return_address(0)); |
1850 | } | 1853 | } |
1851 | 1854 | ||
1852 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) | 1855 | #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) |
@@ -1867,7 +1870,7 @@ void *vmalloc_exec(unsigned long size) | |||
1867 | void *vmalloc_32(unsigned long size) | 1870 | void *vmalloc_32(unsigned long size) |
1868 | { | 1871 | { |
1869 | return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, | 1872 | return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, |
1870 | -1, __builtin_return_address(0)); | 1873 | NUMA_NO_NODE, __builtin_return_address(0)); |
1871 | } | 1874 | } |
1872 | EXPORT_SYMBOL(vmalloc_32); | 1875 | EXPORT_SYMBOL(vmalloc_32); |
1873 | 1876 | ||
@@ -1884,7 +1887,7 @@ void *vmalloc_32_user(unsigned long size) | |||
1884 | void *ret; | 1887 | void *ret; |
1885 | 1888 | ||
1886 | ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, | 1889 | ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, |
1887 | -1, __builtin_return_address(0)); | 1890 | NUMA_NO_NODE, __builtin_return_address(0)); |
1888 | if (ret) { | 1891 | if (ret) { |
1889 | area = find_vm_area(ret); | 1892 | area = find_vm_area(ret); |
1890 | area->flags |= VM_USERMAP; | 1893 | area->flags |= VM_USERMAP; |