aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmalloc.c33
4 files changed, 26 insertions, 23 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6049376c7226..bfa142e67b1c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2376,7 +2376,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2376 struct page *page; 2376 struct page *page;
2377 unsigned long _address; 2377 unsigned long _address;
2378 spinlock_t *ptl; 2378 spinlock_t *ptl;
2379 int node = -1; 2379 int node = NUMA_NO_NODE;
2380 2380
2381 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2381 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2382 2382
@@ -2406,7 +2406,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2406 * be more sophisticated and look at more pages, 2406 * be more sophisticated and look at more pages,
2407 * but isn't for now. 2407 * but isn't for now.
2408 */ 2408 */
2409 if (node == -1) 2409 if (node == NUMA_NO_NODE)
2410 node = page_to_nid(page); 2410 node = page_to_nid(page);
2411 VM_BUG_ON(PageCompound(page)); 2411 VM_BUG_ON(PageCompound(page));
2412 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2412 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e2661d1c5c33..31d26637b658 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -26,7 +26,7 @@
26 * the allocation to memory nodes instead 26 * the allocation to memory nodes instead
27 * 27 *
28 * preferred Try a specific node first before normal fallback. 28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation 29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default, 30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default 31 * but useful to set in a VMA when you have a non default
32 * process policy. 32 * process policy.
@@ -127,7 +127,7 @@ static struct mempolicy *get_task_policy(struct task_struct *p)
127 127
128 if (!pol) { 128 if (!pol) {
129 node = numa_node_id(); 129 node = numa_node_id();
130 if (node != -1) 130 if (node != NUMA_NO_NODE)
131 pol = &preferred_node_policy[node]; 131 pol = &preferred_node_policy[node];
132 132
133 /* preferred_node_policy is not initialised early in boot */ 133 /* preferred_node_policy is not initialised early in boot */
@@ -258,7 +258,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
258 struct mempolicy *policy; 258 struct mempolicy *policy;
259 259
260 pr_debug("setting mode %d flags %d nodes[0] %lx\n", 260 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
261 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); 261 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
262 262
263 if (mode == MPOL_DEFAULT) { 263 if (mode == MPOL_DEFAULT) {
264 if (nodes && !nodes_empty(*nodes)) 264 if (nodes && !nodes_empty(*nodes))
@@ -1221,7 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1221 1221
1222 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1222 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1223 start, start + len, mode, mode_flags, 1223 start, start + len, mode, mode_flags,
1224 nmask ? nodes_addr(*nmask)[0] : -1); 1224 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1225 1225
1226 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 1226 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1227 1227
@@ -2488,7 +2488,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
2488 vma->vm_pgoff, 2488 vma->vm_pgoff,
2489 sz, npol ? npol->mode : -1, 2489 sz, npol ? npol->mode : -1,
2490 npol ? npol->flags : -1, 2490 npol ? npol->flags : -1,
2491 npol ? nodes_addr(npol->v.nodes)[0] : -1); 2491 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2492 2492
2493 if (npol) { 2493 if (npol) {
2494 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 2494 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a40b2f1cac2f..159f81577774 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3261,7 +3261,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
3261{ 3261{
3262 int n, val; 3262 int n, val;
3263 int min_val = INT_MAX; 3263 int min_val = INT_MAX;
3264 int best_node = -1; 3264 int best_node = NUMA_NO_NODE;
3265 const struct cpumask *tmp = cpumask_of_node(0); 3265 const struct cpumask *tmp = cpumask_of_node(0);
3266 3266
3267 /* Use the local node if we haven't already */ 3267 /* Use the local node if we haven't already */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5123a169ab7b..0f751f2068c3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1376,8 +1376,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
1376struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1376struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1377 unsigned long start, unsigned long end) 1377 unsigned long start, unsigned long end)
1378{ 1378{
1379 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, 1379 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1380 __builtin_return_address(0)); 1380 GFP_KERNEL, __builtin_return_address(0));
1381} 1381}
1382EXPORT_SYMBOL_GPL(__get_vm_area); 1382EXPORT_SYMBOL_GPL(__get_vm_area);
1383 1383
@@ -1385,8 +1385,8 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1385 unsigned long start, unsigned long end, 1385 unsigned long start, unsigned long end,
1386 const void *caller) 1386 const void *caller)
1387{ 1387{
1388 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, 1388 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1389 caller); 1389 GFP_KERNEL, caller);
1390} 1390}
1391 1391
1392/** 1392/**
@@ -1401,14 +1401,15 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1401struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1401struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1402{ 1402{
1403 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1403 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1404 -1, GFP_KERNEL, __builtin_return_address(0)); 1404 NUMA_NO_NODE, GFP_KERNEL,
1405 __builtin_return_address(0));
1405} 1406}
1406 1407
1407struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1408struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1408 const void *caller) 1409 const void *caller)
1409{ 1410{
1410 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1411 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1411 -1, GFP_KERNEL, caller); 1412 NUMA_NO_NODE, GFP_KERNEL, caller);
1412} 1413}
1413 1414
1414/** 1415/**
@@ -1650,7 +1651,7 @@ fail:
1650 * @end: vm area range end 1651 * @end: vm area range end
1651 * @gfp_mask: flags for the page level allocator 1652 * @gfp_mask: flags for the page level allocator
1652 * @prot: protection mask for the allocated pages 1653 * @prot: protection mask for the allocated pages
1653 * @node: node to use for allocation or -1 1654 * @node: node to use for allocation or NUMA_NO_NODE
1654 * @caller: caller's return address 1655 * @caller: caller's return address
1655 * 1656 *
1656 * Allocate enough pages to cover @size from the page level 1657 * Allocate enough pages to cover @size from the page level
@@ -1706,7 +1707,7 @@ fail:
1706 * @align: desired alignment 1707 * @align: desired alignment
1707 * @gfp_mask: flags for the page level allocator 1708 * @gfp_mask: flags for the page level allocator
1708 * @prot: protection mask for the allocated pages 1709 * @prot: protection mask for the allocated pages
1709 * @node: node to use for allocation or -1 1710 * @node: node to use for allocation or NUMA_NO_NODE
1710 * @caller: caller's return address 1711 * @caller: caller's return address
1711 * 1712 *
1712 * Allocate enough pages to cover @size from the page level 1713 * Allocate enough pages to cover @size from the page level
@@ -1723,7 +1724,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1723 1724
1724void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1725void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1725{ 1726{
1726 return __vmalloc_node(size, 1, gfp_mask, prot, -1, 1727 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1727 __builtin_return_address(0)); 1728 __builtin_return_address(0));
1728} 1729}
1729EXPORT_SYMBOL(__vmalloc); 1730EXPORT_SYMBOL(__vmalloc);
@@ -1746,7 +1747,8 @@ static inline void *__vmalloc_node_flags(unsigned long size,
1746 */ 1747 */
1747void *vmalloc(unsigned long size) 1748void *vmalloc(unsigned long size)
1748{ 1749{
1749 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM); 1750 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1751 GFP_KERNEL | __GFP_HIGHMEM);
1750} 1752}
1751EXPORT_SYMBOL(vmalloc); 1753EXPORT_SYMBOL(vmalloc);
1752 1754
@@ -1762,7 +1764,7 @@ EXPORT_SYMBOL(vmalloc);
1762 */ 1764 */
1763void *vzalloc(unsigned long size) 1765void *vzalloc(unsigned long size)
1764{ 1766{
1765 return __vmalloc_node_flags(size, -1, 1767 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1766 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1768 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1767} 1769}
1768EXPORT_SYMBOL(vzalloc); 1770EXPORT_SYMBOL(vzalloc);
@@ -1781,7 +1783,8 @@ void *vmalloc_user(unsigned long size)
1781 1783
1782 ret = __vmalloc_node(size, SHMLBA, 1784 ret = __vmalloc_node(size, SHMLBA,
1783 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1785 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1784 PAGE_KERNEL, -1, __builtin_return_address(0)); 1786 PAGE_KERNEL, NUMA_NO_NODE,
1787 __builtin_return_address(0));
1785 if (ret) { 1788 if (ret) {
1786 area = find_vm_area(ret); 1789 area = find_vm_area(ret);
1787 area->flags |= VM_USERMAP; 1790 area->flags |= VM_USERMAP;
@@ -1846,7 +1849,7 @@ EXPORT_SYMBOL(vzalloc_node);
1846void *vmalloc_exec(unsigned long size) 1849void *vmalloc_exec(unsigned long size)
1847{ 1850{
1848 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1851 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1849 -1, __builtin_return_address(0)); 1852 NUMA_NO_NODE, __builtin_return_address(0));
1850} 1853}
1851 1854
1852#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1855#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
@@ -1867,7 +1870,7 @@ void *vmalloc_exec(unsigned long size)
1867void *vmalloc_32(unsigned long size) 1870void *vmalloc_32(unsigned long size)
1868{ 1871{
1869 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 1872 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1870 -1, __builtin_return_address(0)); 1873 NUMA_NO_NODE, __builtin_return_address(0));
1871} 1874}
1872EXPORT_SYMBOL(vmalloc_32); 1875EXPORT_SYMBOL(vmalloc_32);
1873 1876
@@ -1884,7 +1887,7 @@ void *vmalloc_32_user(unsigned long size)
1884 void *ret; 1887 void *ret;
1885 1888
1886 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1889 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1887 -1, __builtin_return_address(0)); 1890 NUMA_NO_NODE, __builtin_return_address(0));
1888 if (ret) { 1891 if (ret) {
1889 area = find_vm_area(ret); 1892 area = find_vm_area(ret);
1890 area->flags |= VM_USERMAP; 1893 area->flags |= VM_USERMAP;