aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c21
1 files changed, 9 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 81e18ceef579..1741dd23e7c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -46,6 +46,7 @@
46#include <linux/stop_machine.h> 46#include <linux/stop_machine.h>
47#include <linux/sort.h> 47#include <linux/sort.h>
48#include <linux/pfn.h> 48#include <linux/pfn.h>
49#include <xen/xen.h>
49#include <linux/backing-dev.h> 50#include <linux/backing-dev.h>
50#include <linux/fault-inject.h> 51#include <linux/fault-inject.h>
51#include <linux/page-isolation.h> 52#include <linux/page-isolation.h>
@@ -347,6 +348,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
347 /* Always populate low zones for address-constrained allocations */ 348 /* Always populate low zones for address-constrained allocations */
348 if (zone_end < pgdat_end_pfn(pgdat)) 349 if (zone_end < pgdat_end_pfn(pgdat))
349 return true; 350 return true;
351 /* Xen PV domains need page structures early */
352 if (xen_pv_domain())
353 return true;
350 (*nr_initialised)++; 354 (*nr_initialised)++;
351 if ((*nr_initialised > pgdat->static_init_pgcnt) && 355 if ((*nr_initialised > pgdat->static_init_pgcnt) &&
352 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 356 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
@@ -1906,7 +1910,9 @@ static int move_freepages(struct zone *zone,
1906 * Remove at a later date when no bug reports exist related to 1910 * Remove at a later date when no bug reports exist related to
1907 * grouping pages by mobility 1911 * grouping pages by mobility
1908 */ 1912 */
1909 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1913 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
1914 pfn_valid(page_to_pfn(end_page)) &&
1915 page_zone(start_page) != page_zone(end_page));
1910#endif 1916#endif
1911 1917
1912 if (num_movable) 1918 if (num_movable)
@@ -3590,7 +3596,7 @@ static bool __need_fs_reclaim(gfp_t gfp_mask)
3590 return false; 3596 return false;
3591 3597
3592 /* this guy won't enter reclaim */ 3598 /* this guy won't enter reclaim */
3593 if ((current->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) 3599 if (current->flags & PF_MEMALLOC)
3594 return false; 3600 return false;
3595 3601
3596 /* We're only interested __GFP_FS allocations for now */ 3602 /* We're only interested __GFP_FS allocations for now */
@@ -5350,17 +5356,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5350 if (context != MEMMAP_EARLY) 5356 if (context != MEMMAP_EARLY)
5351 goto not_early; 5357 goto not_early;
5352 5358
5353 if (!early_pfn_valid(pfn)) { 5359 if (!early_pfn_valid(pfn))
5354#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5355 /*
5356 * Skip to the pfn preceding the next valid one (or
5357 * end_pfn), such that we hit a valid pfn (or end_pfn)
5358 * on our next iteration of the loop.
5359 */
5360 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5361#endif
5362 continue; 5360 continue;
5363 }
5364 if (!early_pfn_in_nid(pfn, nid)) 5361 if (!early_pfn_in_nid(pfn, nid))
5365 continue; 5362 continue;
5366 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) 5363 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))