aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a919ba5cb3c8..6847177dc4a1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4061,17 +4061,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4061 int reserve_flags; 4061 int reserve_flags;
4062 4062
4063 /* 4063 /*
4064 * In the slowpath, we sanity check order to avoid ever trying to
4065 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
4066 * be using allocators in order of preference for an area that is
4067 * too large.
4068 */
4069 if (order >= MAX_ORDER) {
4070 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4071 return NULL;
4072 }
4073
4074 /*
4075 * We also sanity check to catch abuse of atomic reserves being used by 4064 * We also sanity check to catch abuse of atomic reserves being used by
4076 * callers that are not in atomic context. 4065 * callers that are not in atomic context.
4077 */ 4066 */
@@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4364 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ 4353 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
4365 struct alloc_context ac = { }; 4354 struct alloc_context ac = { };
4366 4355
4356 /*
4357 * There are several places where we assume that the order value is sane
4358 * so bail out early if the request is out of bound.
4359 */
4360 if (unlikely(order >= MAX_ORDER)) {
4361 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4362 return NULL;
4363 }
4364
4367 gfp_mask &= gfp_allowed_mask; 4365 gfp_mask &= gfp_allowed_mask;
4368 alloc_mask = gfp_mask; 4366 alloc_mask = gfp_mask;
4369 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) 4367 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
@@ -7789,6 +7787,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7789 goto unmovable; 7787 goto unmovable;
7790 7788
7791 /* 7789 /*
7790 * If the zone is movable and we have ruled out all reserved
7791 * pages then it should be reasonably safe to assume the rest
7792 * is movable.
7793 */
7794 if (zone_idx(zone) == ZONE_MOVABLE)
7795 continue;
7796
7797 /*
7792 * Hugepages are not in LRU lists, but they're movable. 7798 * Hugepages are not in LRU lists, but they're movable.
7793 * We need not scan over tail pages bacause we don't 7799 * We need not scan over tail pages bacause we don't
7794 * handle each tail page individually in migration. 7800 * handle each tail page individually in migration.