aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5dac5d8cb148..85759cdd6973 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1446,9 +1446,6 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1446 int zlc_active = 0; /* set if using zonelist_cache */ 1446 int zlc_active = 0; /* set if using zonelist_cache */
1447 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1447 int did_zlc_setup = 0; /* just call zlc_setup() one time */
1448 1448
1449 if (WARN_ON_ONCE(order >= MAX_ORDER))
1450 return NULL;
1451
1452 classzone_idx = zone_idx(preferred_zone); 1449 classzone_idx = zone_idx(preferred_zone);
1453zonelist_scan: 1450zonelist_scan:
1454 /* 1451 /*
@@ -1707,6 +1704,15 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1707 struct task_struct *p = current; 1704 struct task_struct *p = current;
1708 1705
1709 /* 1706 /*
1707 * In the slowpath, we sanity check order to avoid ever trying to
1708 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1709 * be using allocators in order of preference for an area that is
1710 * too large.
1711 */
1712 if (WARN_ON_ONCE(order >= MAX_ORDER))
1713 return NULL;
1714
1715 /*
1710 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1716 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1711 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1717 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1712 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1718 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim