diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a919ba5cb3c8..2ec9cc407216 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4061,17 +4061,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
4061 | int reserve_flags; | 4061 | int reserve_flags; |
4062 | 4062 | ||
4063 | /* | 4063 | /* |
4064 | * In the slowpath, we sanity check order to avoid ever trying to | ||
4065 | * reclaim >= MAX_ORDER areas which will never succeed. Callers may | ||
4066 | * be using allocators in order of preference for an area that is | ||
4067 | * too large. | ||
4068 | */ | ||
4069 | if (order >= MAX_ORDER) { | ||
4070 | WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); | ||
4071 | return NULL; | ||
4072 | } | ||
4073 | |||
4074 | /* | ||
4075 | * We also sanity check to catch abuse of atomic reserves being used by | 4064 | * We also sanity check to catch abuse of atomic reserves being used by |
4076 | * callers that are not in atomic context. | 4065 | * callers that are not in atomic context. |
4077 | */ | 4066 | */ |
@@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, | |||
4364 | gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ | 4353 | gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ |
4365 | struct alloc_context ac = { }; | 4354 | struct alloc_context ac = { }; |
4366 | 4355 | ||
4356 | /* | ||
4357 | * There are several places where we assume that the order value is sane | ||
4358 | * so bail out early if the request is out of bound. | ||
4359 | */ | ||
4360 | if (unlikely(order >= MAX_ORDER)) { | ||
4361 | WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); | ||
4362 | return NULL; | ||
4363 | } | ||
4364 | |||
4367 | gfp_mask &= gfp_allowed_mask; | 4365 | gfp_mask &= gfp_allowed_mask; |
4368 | alloc_mask = gfp_mask; | 4366 | alloc_mask = gfp_mask; |
4369 | if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) | 4367 | if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) |
@@ -5815,8 +5813,10 @@ void __meminit init_currently_empty_zone(struct zone *zone, | |||
5815 | unsigned long size) | 5813 | unsigned long size) |
5816 | { | 5814 | { |
5817 | struct pglist_data *pgdat = zone->zone_pgdat; | 5815 | struct pglist_data *pgdat = zone->zone_pgdat; |
5816 | int zone_idx = zone_idx(zone) + 1; | ||
5818 | 5817 | ||
5819 | pgdat->nr_zones = zone_idx(zone) + 1; | 5818 | if (zone_idx > pgdat->nr_zones) |
5819 | pgdat->nr_zones = zone_idx; | ||
5820 | 5820 | ||
5821 | zone->zone_start_pfn = zone_start_pfn; | 5821 | zone->zone_start_pfn = zone_start_pfn; |
5822 | 5822 | ||
@@ -7789,6 +7789,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
7789 | goto unmovable; | 7789 | goto unmovable; |
7790 | 7790 | ||
7791 | /* | 7791 | /* |
7792 | * If the zone is movable and we have ruled out all reserved | ||
7793 | * pages then it should be reasonably safe to assume the rest | ||
7794 | * is movable. | ||
7795 | */ | ||
7796 | if (zone_idx(zone) == ZONE_MOVABLE) | ||
7797 | continue; | ||
7798 | |||
7799 | /* | ||
7792 | * Hugepages are not in LRU lists, but they're movable. | 7800 | * Hugepages are not in LRU lists, but they're movable. |
7793 | * We need not scan over tail pages bacause we don't | 7801 | * We need not scan over tail pages bacause we don't |
7794 | * handle each tail page individually in migration. | 7802 | * handle each tail page individually in migration. |