aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c54
1 files changed, 23 insertions, 31 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c988d324e3f6..26246fdf45b5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5005,15 +5005,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5005 5005
5006#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5006#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5007 /* 5007 /*
5008 * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
5009 * from zone_movable_pfn[nid] to end of each node should be
5010 * ZONE_MOVABLE not ZONE_NORMAL. skip it.
5011 */
5012 if (!mirrored_kernelcore && zone_movable_pfn[nid])
5013 if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
5014 continue;
5015
5016 /*
5017 * Check given memblock attribute by firmware which can affect 5008 * Check given memblock attribute by firmware which can affect
5018 * kernel memory layout. If zone==ZONE_MOVABLE but memory is 5009 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5019 * mirrored, it's an overlapped memmap init. skip it. 5010 * mirrored, it's an overlapped memmap init. skip it.
@@ -5456,6 +5447,12 @@ static void __meminit adjust_zone_range_for_zone_movable(int nid,
5456 *zone_end_pfn = min(node_end_pfn, 5447 *zone_end_pfn = min(node_end_pfn,
5457 arch_zone_highest_possible_pfn[movable_zone]); 5448 arch_zone_highest_possible_pfn[movable_zone]);
5458 5449
5450 /* Adjust for ZONE_MOVABLE starting within this range */
5451 } else if (!mirrored_kernelcore &&
5452 *zone_start_pfn < zone_movable_pfn[nid] &&
5453 *zone_end_pfn > zone_movable_pfn[nid]) {
5454 *zone_end_pfn = zone_movable_pfn[nid];
5455
5459 /* Check if this whole range is within ZONE_MOVABLE */ 5456 /* Check if this whole range is within ZONE_MOVABLE */
5460 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 5457 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5461 *zone_start_pfn = *zone_end_pfn; 5458 *zone_start_pfn = *zone_end_pfn;
@@ -5559,28 +5556,23 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
5559 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 5556 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5560 * and vice versa. 5557 * and vice versa.
5561 */ 5558 */
5562 if (zone_movable_pfn[nid]) { 5559 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5563 if (mirrored_kernelcore) { 5560 unsigned long start_pfn, end_pfn;
5564 unsigned long start_pfn, end_pfn; 5561 struct memblock_region *r;
5565 struct memblock_region *r; 5562
5566 5563 for_each_memblock(memory, r) {
5567 for_each_memblock(memory, r) { 5564 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5568 start_pfn = clamp(memblock_region_memory_base_pfn(r), 5565 zone_start_pfn, zone_end_pfn);
5569 zone_start_pfn, zone_end_pfn); 5566 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5570 end_pfn = clamp(memblock_region_memory_end_pfn(r), 5567 zone_start_pfn, zone_end_pfn);
5571 zone_start_pfn, zone_end_pfn); 5568
5572 5569 if (zone_type == ZONE_MOVABLE &&
5573 if (zone_type == ZONE_MOVABLE && 5570 memblock_is_mirror(r))
5574 memblock_is_mirror(r)) 5571 nr_absent += end_pfn - start_pfn;
5575 nr_absent += end_pfn - start_pfn; 5572
5576 5573 if (zone_type == ZONE_NORMAL &&
5577 if (zone_type == ZONE_NORMAL && 5574 !memblock_is_mirror(r))
5578 !memblock_is_mirror(r)) 5575 nr_absent += end_pfn - start_pfn;
5579 nr_absent += end_pfn - start_pfn;
5580 }
5581 } else {
5582 if (zone_type == ZONE_NORMAL)
5583 nr_absent += node_end_pfn - zone_movable_pfn[nid];
5584 } 5576 }
5585 } 5577 }
5586 5578