summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2015-09-08 18:03:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commit0bc35a970c01c50e3bcc4b5a612787346024e5db (patch)
tree54724a480e193cd3edc795797fc73fdc677ab72d /include/linux
parent96db800f5d73cd5c49461253d45766e094f0f8c2 (diff)
mm: unify checks in alloc_pages_node() and __alloc_pages_node()
Perform the same debug checks in alloc_pages_node() as are done in __alloc_pages_node(), by making the former function a wrapper of the latter one. In addition to better diagnostics in DEBUG_VM builds for situations which have been already fatal (e.g. out-of-bounds node id), there are two visible changes for potential existing buggy callers of alloc_pages_node(): - calling alloc_pages_node() with any negative nid (e.g. due to arithmetic overflow) was treated as passing NUMA_NO_NODE and fallback to local node was applied. This will now be fatal. - calling alloc_pages_node() with an offline node will now be checked for DEBUG_VM builds. Since it's not fatal if the node has been previously online, and this patch may expose some existing buggy callers, change the VM_BUG_ON in __alloc_pages_node() to VM_WARN_ON. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/gfp.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index d2c142bc872e..4a12cae2fb0c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -310,23 +310,23 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
310static inline struct page * 310static inline struct page *
311__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 311__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
312{ 312{
313 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); 313 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
314 VM_WARN_ON(!node_online(nid));
314 315
315 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 316 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
316} 317}
317 318
318/* 319/*
319 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, 320 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
320 * prefer the current CPU's node. 321 * prefer the current CPU's node. Otherwise node must be valid and online.
321 */ 322 */
322static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 323static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
323 unsigned int order) 324 unsigned int order)
324{ 325{
325 /* Unknown node is current node */ 326 if (nid == NUMA_NO_NODE)
326 if (nid < 0)
327 nid = numa_node_id(); 327 nid = numa_node_id();
328 328
329 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 329 return __alloc_pages_node(nid, gfp_mask, order);
330} 330}
331 331
332#ifdef CONFIG_NUMA 332#ifdef CONFIG_NUMA