aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2015-09-08 18:03:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commit96db800f5d73cd5c49461253d45766e094f0f8c2 (patch)
tree7f6ea4c232c04196cedf34d8b35cce641228daa6
parent7fadc820222497eac234d1d51a66517c00a6ca4c (diff)
mm: rename alloc_pages_exact_node() to __alloc_pages_node()
alloc_pages_exact_node() was introduced in commit 6484eb3e2a81 ("page allocator: do not check NUMA node ID when the caller knows the node is valid") as an optimized variant of alloc_pages_node(), that doesn't fallback to current node for nid == NUMA_NO_NODE. Unfortunately the name of the function can easily suggest that the allocation is restricted to the given node and fails otherwise. In truth, the node is only preferred, unless __GFP_THISNODE is passed among the gfp flags. The misleading name has lead to mistakes in the past, see for example commits 5265047ac301 ("mm, thp: really limit transparent hugepage allocation to local node") and b360edb43f8e ("mm, mempolicy: migrate_to_node should only migrate to node"). Another issue with the name is that there's a family of alloc_pages_exact*() functions where 'exact' means exact size (instead of page order), which leads to more confusion. To prevent further mistakes, this patch effectively renames alloc_pages_exact_node() to __alloc_pages_node() to better convey that it's an optimized variant of alloc_pages_node() not intended for general usage. Both functions get described in comments. It has been also considered to really provide a convenience function for allocations restricted to a node, but the major opinion seems to be that __GFP_THISNODE already provides that functionality and we shouldn't duplicate the API needlessly. The number of users would be small anyway. Existing callers of alloc_pages_exact_node() are simply converted to call __alloc_pages_node(), with the exception of sba_alloc_coherent() which open-codes the check for NUMA_NO_NODE, so it is converted to use alloc_pages_node() instead. This means it no longer performs some VM_BUG_ON checks, and since the current check for nid in alloc_pages_node() uses a 'nid < 0' comparison (which includes NUMA_NO_NODE), it may hide wrong values which would be previously exposed. Both differences will be rectified by the next patch. To sum up, this patch makes no functional changes, except temporarily hiding potentially buggy callers. Restricting the checks in alloc_pages_node() is left for the next patch which can in turn expose more existing buggy callers. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Robin Holt <robinmholt@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Gleb Natapov <gleb@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Cliff Whickman <cpw@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/ia64/hp/common/sba_iommu.c6
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c2
-rw-r--r--arch/powerpc/platforms/cell/ras.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--include/linux/gfp.h23
-rw-r--r--kernel/profile.c8
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c2
18 files changed, 38 insertions, 37 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 344387a55406..a6d6190c9d24 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1140,13 +1140,9 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1140 1140
1141#ifdef CONFIG_NUMA 1141#ifdef CONFIG_NUMA
1142 { 1142 {
1143 int node = ioc->node;
1144 struct page *page; 1143 struct page *page;
1145 1144
1146 if (node == NUMA_NO_NODE) 1145 page = alloc_pages_node(ioc->node, flags, get_order(size));
1147 node = numa_node_id();
1148
1149 page = alloc_pages_exact_node(node, flags, get_order(size));
1150 if (unlikely(!page)) 1146 if (unlikely(!page))
1151 return NULL; 1147 return NULL;
1152 1148
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 20e8a9b21d75..f3976da36721 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -97,7 +97,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
97 97
98 /* attempt to allocate a granule's worth of cached memory pages */ 98 /* attempt to allocate a granule's worth of cached memory pages */
99 99
100 page = alloc_pages_exact_node(nid, 100 page = __alloc_pages_node(nid,
101 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 101 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
102 IA64_GRANULE_SHIFT-PAGE_SHIFT); 102 IA64_GRANULE_SHIFT-PAGE_SHIFT);
103 if (!page) { 103 if (!page) {
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index d0853e8e8623..8f59907007cb 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -92,7 +92,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
92 */ 92 */
93 node = pcibus_to_node(pdev->bus); 93 node = pcibus_to_node(pdev->bus);
94 if (likely(node >=0)) { 94 if (likely(node >=0)) {
95 struct page *p = alloc_pages_exact_node(node, 95 struct page *p = __alloc_pages_node(node,
96 flags, get_order(size)); 96 flags, get_order(size));
97 97
98 if (likely(p)) 98 if (likely(p))
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index e865d748179b..2d4f60c0119a 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -123,7 +123,7 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
123 123
124 area->nid = nid; 124 area->nid = nid;
125 area->order = order; 125 area->order = order;
126 area->pages = alloc_pages_exact_node(area->nid, 126 area->pages = __alloc_pages_node(area->nid,
127 GFP_KERNEL|__GFP_THISNODE, 127 GFP_KERNEL|__GFP_THISNODE,
128 area->order); 128 area->order);
129 129
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4a4eec30cc08..148ea2016022 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3150,7 +3150,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
3150 struct page *pages; 3150 struct page *pages;
3151 struct vmcs *vmcs; 3151 struct vmcs *vmcs;
3152 3152
3153 pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order); 3153 pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
3154 if (!pages) 3154 if (!pages)
3155 return NULL; 3155 return NULL;
3156 vmcs = page_address(pages); 3156 vmcs = page_address(pages);
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 95c894482fdd..340b44d9e8cf 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -239,7 +239,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
239 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 239 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
240 240
241 nid = cpu_to_node(cpu); 241 nid = cpu_to_node(cpu);
242 page = alloc_pages_exact_node(nid, 242 page = __alloc_pages_node(nid,
243 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 243 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
244 pg_order); 244 pg_order);
245 if (page == NULL) { 245 if (page == NULL) {
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3bd64b115999..d2c142bc872e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -303,20 +303,28 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
303 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); 303 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
304} 304}
305 305
306static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 306/*
307 unsigned int order) 307 * Allocate pages, preferring the node given as nid. The node must be valid and
308 * online. For more general interface, see alloc_pages_node().
309 */
310static inline struct page *
311__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
308{ 312{
309 /* Unknown node is current node */ 313 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
310 if (nid < 0)
311 nid = numa_node_id();
312 314
313 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 315 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
314} 316}
315 317
316static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, 318/*
319 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
320 * prefer the current CPU's node.
321 */
322static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
317 unsigned int order) 323 unsigned int order)
318{ 324{
319 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); 325 /* Unknown node is current node */
326 if (nid < 0)
327 nid = numa_node_id();
320 328
321 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 329 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
322} 330}
@@ -357,7 +365,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
357 365
358void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 366void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
359void free_pages_exact(void *virt, size_t size); 367void free_pages_exact(void *virt, size_t size);
360/* This is different from alloc_pages_exact_node !!! */
361void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); 368void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
362 369
363#define __get_free_page(gfp_mask) \ 370#define __get_free_page(gfp_mask) \
diff --git a/kernel/profile.c b/kernel/profile.c
index a7bcd28d6e9f..99513e1160e5 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -339,7 +339,7 @@ static int profile_cpu_callback(struct notifier_block *info,
339 node = cpu_to_mem(cpu); 339 node = cpu_to_mem(cpu);
340 per_cpu(cpu_profile_flip, cpu) = 0; 340 per_cpu(cpu_profile_flip, cpu) = 0;
341 if (!per_cpu(cpu_profile_hits, cpu)[1]) { 341 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
342 page = alloc_pages_exact_node(node, 342 page = __alloc_pages_node(node,
343 GFP_KERNEL | __GFP_ZERO, 343 GFP_KERNEL | __GFP_ZERO,
344 0); 344 0);
345 if (!page) 345 if (!page)
@@ -347,7 +347,7 @@ static int profile_cpu_callback(struct notifier_block *info,
347 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); 347 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
348 } 348 }
349 if (!per_cpu(cpu_profile_hits, cpu)[0]) { 349 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
350 page = alloc_pages_exact_node(node, 350 page = __alloc_pages_node(node,
351 GFP_KERNEL | __GFP_ZERO, 351 GFP_KERNEL | __GFP_ZERO,
352 0); 352 0);
353 if (!page) 353 if (!page)
@@ -543,14 +543,14 @@ static int create_hash_tables(void)
543 int node = cpu_to_mem(cpu); 543 int node = cpu_to_mem(cpu);
544 struct page *page; 544 struct page *page;
545 545
546 page = alloc_pages_exact_node(node, 546 page = __alloc_pages_node(node,
547 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 547 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
548 0); 548 0);
549 if (!page) 549 if (!page)
550 goto out_cleanup; 550 goto out_cleanup;
551 per_cpu(cpu_profile_hits, cpu)[1] 551 per_cpu(cpu_profile_hits, cpu)[1]
552 = (struct profile_hit *)page_address(page); 552 = (struct profile_hit *)page_address(page);
553 page = alloc_pages_exact_node(node, 553 page = __alloc_pages_node(node,
554 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 554 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
555 0); 555 0);
556 if (!page) 556 if (!page)
diff --git a/mm/filemap.c b/mm/filemap.c
index 30d69c0c5a38..72940fb38666 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -674,7 +674,7 @@ struct page *__page_cache_alloc(gfp_t gfp)
674 do { 674 do {
675 cpuset_mems_cookie = read_mems_allowed_begin(); 675 cpuset_mems_cookie = read_mems_allowed_begin();
676 n = cpuset_mem_spread_node(); 676 n = cpuset_mem_spread_node();
677 page = alloc_pages_exact_node(n, gfp, 0); 677 page = __alloc_pages_node(n, gfp, 0);
678 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 678 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
679 679
680 return page; 680 return page;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 71a4822c832b..883f613ada7e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2414,7 +2414,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2414 */ 2414 */
2415 up_read(&mm->mmap_sem); 2415 up_read(&mm->mmap_sem);
2416 2416
2417 *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER); 2417 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
2418 if (unlikely(!*hpage)) { 2418 if (unlikely(!*hpage)) {
2419 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2419 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2420 *hpage = ERR_PTR(-ENOMEM); 2420 *hpage = ERR_PTR(-ENOMEM);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cd1280c487ff..999fb0aef8f1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1331,7 +1331,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1331{ 1331{
1332 struct page *page; 1332 struct page *page;
1333 1333
1334 page = alloc_pages_exact_node(nid, 1334 page = __alloc_pages_node(nid,
1335 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 1335 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1336 __GFP_REPEAT|__GFP_NOWARN, 1336 __GFP_REPEAT|__GFP_NOWARN,
1337 huge_page_order(h)); 1337 huge_page_order(h));
@@ -1483,7 +1483,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1483 __GFP_REPEAT|__GFP_NOWARN, 1483 __GFP_REPEAT|__GFP_NOWARN,
1484 huge_page_order(h)); 1484 huge_page_order(h));
1485 else 1485 else
1486 page = alloc_pages_exact_node(nid, 1486 page = __alloc_pages_node(nid,
1487 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| 1487 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1488 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); 1488 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1489 1489
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index bba2d7c2c9ce..eeda6485e76c 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1521,7 +1521,7 @@ static struct page *new_page(struct page *p, unsigned long private, int **x)
1521 return alloc_huge_page_node(page_hstate(compound_head(p)), 1521 return alloc_huge_page_node(page_hstate(compound_head(p)),
1522 nid); 1522 nid);
1523 else 1523 else
1524 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); 1524 return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
1525} 1525}
1526 1526
1527/* 1527/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d6f2caee28c0..87a177917cb2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -942,7 +942,7 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
942 return alloc_huge_page_node(page_hstate(compound_head(page)), 942 return alloc_huge_page_node(page_hstate(compound_head(page)),
943 node); 943 node);
944 else 944 else
945 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE | 945 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
946 __GFP_THISNODE, 0); 946 __GFP_THISNODE, 0);
947} 947}
948 948
@@ -1998,7 +1998,7 @@ retry_cpuset:
1998 nmask = policy_nodemask(gfp, pol); 1998 nmask = policy_nodemask(gfp, pol);
1999 if (!nmask || node_isset(hpage_node, *nmask)) { 1999 if (!nmask || node_isset(hpage_node, *nmask)) {
2000 mpol_cond_put(pol); 2000 mpol_cond_put(pol);
2001 page = alloc_pages_exact_node(hpage_node, 2001 page = __alloc_pages_node(hpage_node,
2002 gfp | __GFP_THISNODE, order); 2002 gfp | __GFP_THISNODE, order);
2003 goto out; 2003 goto out;
2004 } 2004 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 918defbdda0e..02ce25df16c2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1195,7 +1195,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
1195 return alloc_huge_page_node(page_hstate(compound_head(p)), 1195 return alloc_huge_page_node(page_hstate(compound_head(p)),
1196 pm->node); 1196 pm->node);
1197 else 1197 else
1198 return alloc_pages_exact_node(pm->node, 1198 return __alloc_pages_node(pm->node,
1199 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); 1199 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1200} 1200}
1201 1201
@@ -1555,7 +1555,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1555 int nid = (int) data; 1555 int nid = (int) data;
1556 struct page *newpage; 1556 struct page *newpage;
1557 1557
1558 newpage = alloc_pages_exact_node(nid, 1558 newpage = __alloc_pages_node(nid,
1559 (GFP_HIGHUSER_MOVABLE | 1559 (GFP_HIGHUSER_MOVABLE |
1560 __GFP_THISNODE | __GFP_NOMEMALLOC | 1560 __GFP_THISNODE | __GFP_NOMEMALLOC |
1561 __GFP_NORETRY | __GFP_NOWARN) & 1561 __GFP_NORETRY | __GFP_NOWARN) &
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 252665d553b4..bdaa0cf8fd41 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3511,8 +3511,6 @@ EXPORT_SYMBOL(alloc_pages_exact);
3511 * 3511 *
3512 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 3512 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3513 * back. 3513 * back.
3514 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
3515 * but is not exact.
3516 */ 3514 */
3517void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 3515void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
3518{ 3516{
diff --git a/mm/slab.c b/mm/slab.c
index 60c936938b84..c77ebe6cc87c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1595,7 +1595,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1595 if (memcg_charge_slab(cachep, flags, cachep->gfporder)) 1595 if (memcg_charge_slab(cachep, flags, cachep->gfporder))
1596 return NULL; 1596 return NULL;
1597 1597
1598 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1598 page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1599 if (!page) { 1599 if (!page) {
1600 memcg_uncharge_slab(cachep, cachep->gfporder); 1600 memcg_uncharge_slab(cachep, cachep->gfporder);
1601 slab_out_of_memory(cachep, flags, nodeid); 1601 slab_out_of_memory(cachep, flags, nodeid);
diff --git a/mm/slob.c b/mm/slob.c
index 165bbd3cd606..0d7e5df74d1f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -45,7 +45,7 @@
45 * NUMA support in SLOB is fairly simplistic, pushing most of the real 45 * NUMA support in SLOB is fairly simplistic, pushing most of the real
46 * logic down to the page allocator, and simply doing the node accounting 46 * logic down to the page allocator, and simply doing the node accounting
47 * on the upper levels. In the event that a node id is explicitly 47 * on the upper levels. In the event that a node id is explicitly
48 * provided, alloc_pages_exact_node() with the specified node id is used 48 * provided, __alloc_pages_node() with the specified node id is used
49 * instead. The common case (or when the node id isn't explicitly provided) 49 * instead. The common case (or when the node id isn't explicitly provided)
50 * will default to the current node, as per numa_node_id(). 50 * will default to the current node, as per numa_node_id().
51 * 51 *
@@ -193,7 +193,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
193 193
194#ifdef CONFIG_NUMA 194#ifdef CONFIG_NUMA
195 if (node != NUMA_NO_NODE) 195 if (node != NUMA_NO_NODE)
196 page = alloc_pages_exact_node(node, gfp, order); 196 page = __alloc_pages_node(node, gfp, order);
197 else 197 else
198#endif 198#endif
199 page = alloc_pages(gfp, order); 199 page = alloc_pages(gfp, order);
diff --git a/mm/slub.c b/mm/slub.c
index 084184e706c6..f614b5dc396b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1334,7 +1334,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
1334 if (node == NUMA_NO_NODE) 1334 if (node == NUMA_NO_NODE)
1335 page = alloc_pages(flags, order); 1335 page = alloc_pages(flags, order);
1336 else 1336 else
1337 page = alloc_pages_exact_node(node, flags, order); 1337 page = __alloc_pages_node(node, flags, order);
1338 1338
1339 if (!page) 1339 if (!page)
1340 memcg_uncharge_slab(s, order); 1340 memcg_uncharge_slab(s, order);