summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorXishi Qiu <qiuxishi@huawei.com>2016-10-07 19:58:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 21:46:27 -0400
commite506b99696a296e9aba2e5f3bc5768aa7d8e2396 (patch)
treefcde29a0d6522c6795a66f25ba1df621a46b7e64 /mm/page_alloc.c
parentfdd4c6149a71ff1da98317adb6f18c28f75a6e3f (diff)
mem-hotplug: fix node spanned pages when we have a movable node
Commit 342332e6a925 ("mm/page_alloc.c: introduce kernelcore=mirror option") rewrote the calculation of node spanned pages. But when we have a movable node, the size of node spanned pages is double added. That's because we have an empty normal zone, the present pages is zero, but its spanned pages is not zero. e.g. Zone ranges: DMA [mem 0x0000000000001000-0x0000000000ffffff] DMA32 [mem 0x0000000001000000-0x00000000ffffffff] Normal [mem 0x0000000100000000-0x0000007c7fffffff] Movable zone start for each node Node 1: 0x0000001080000000 Node 2: 0x0000002080000000 Node 3: 0x0000003080000000 Node 4: 0x0000003c80000000 Node 5: 0x0000004c80000000 Node 6: 0x0000005c80000000 Early memory node ranges node 0: [mem 0x0000000000001000-0x000000000009ffff] node 0: [mem 0x0000000000100000-0x000000007552afff] node 0: [mem 0x000000007bd46000-0x000000007bd46fff] node 0: [mem 0x000000007bdcd000-0x000000007bffffff] node 0: [mem 0x0000000100000000-0x000000107fffffff] node 1: [mem 0x0000001080000000-0x000000207fffffff] node 2: [mem 0x0000002080000000-0x000000307fffffff] node 3: [mem 0x0000003080000000-0x0000003c7fffffff] node 4: [mem 0x0000003c80000000-0x0000004c7fffffff] node 5: [mem 0x0000004c80000000-0x0000005c7fffffff] node 6: [mem 0x0000005c80000000-0x0000006c7fffffff] node 7: [mem 0x0000006c80000000-0x0000007c7fffffff] node1: Normal, start=0x1080000, present=0x0, spanned=0x1000000 Movable, start=0x1080000, present=0x1000000, spanned=0x1000000 pgdat, start=0x1080000, present=0x1000000, spanned=0x2000000 After this patch, the problem is fixed. node1: Normal, start=0x0, present=0x0, spanned=0x0 Movable, start=0x1080000, present=0x1000000, spanned=0x1000000 pgdat, start=0x1080000, present=0x1000000, spanned=0x1000000 Link: http://lkml.kernel.org/r/57A325E8.6070100@huawei.com Signed-off-by: Xishi Qiu <qiuxishi@huawei.com> Cc: Taku Izumi <izumi.taku@jp.fujitsu.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c54
1 files changed, 23 insertions, 31 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c988d324e3f6..26246fdf45b5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5005,15 +5005,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5005 5005
5006#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5006#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5007 /* 5007 /*
5008 * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
5009 * from zone_movable_pfn[nid] to end of each node should be
5010 * ZONE_MOVABLE not ZONE_NORMAL. skip it.
5011 */
5012 if (!mirrored_kernelcore && zone_movable_pfn[nid])
5013 if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
5014 continue;
5015
5016 /*
5017 * Check given memblock attribute by firmware which can affect 5008 * Check given memblock attribute by firmware which can affect
5018 * kernel memory layout. If zone==ZONE_MOVABLE but memory is 5009 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
5019 * mirrored, it's an overlapped memmap init. skip it. 5010 * mirrored, it's an overlapped memmap init. skip it.
@@ -5456,6 +5447,12 @@ static void __meminit adjust_zone_range_for_zone_movable(int nid,
5456 *zone_end_pfn = min(node_end_pfn, 5447 *zone_end_pfn = min(node_end_pfn,
5457 arch_zone_highest_possible_pfn[movable_zone]); 5448 arch_zone_highest_possible_pfn[movable_zone]);
5458 5449
5450 /* Adjust for ZONE_MOVABLE starting within this range */
5451 } else if (!mirrored_kernelcore &&
5452 *zone_start_pfn < zone_movable_pfn[nid] &&
5453 *zone_end_pfn > zone_movable_pfn[nid]) {
5454 *zone_end_pfn = zone_movable_pfn[nid];
5455
5459 /* Check if this whole range is within ZONE_MOVABLE */ 5456 /* Check if this whole range is within ZONE_MOVABLE */
5460 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 5457 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5461 *zone_start_pfn = *zone_end_pfn; 5458 *zone_start_pfn = *zone_end_pfn;
@@ -5559,28 +5556,23 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
5559 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 5556 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5560 * and vice versa. 5557 * and vice versa.
5561 */ 5558 */
5562 if (zone_movable_pfn[nid]) { 5559 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5563 if (mirrored_kernelcore) { 5560 unsigned long start_pfn, end_pfn;
5564 unsigned long start_pfn, end_pfn; 5561 struct memblock_region *r;
5565 struct memblock_region *r; 5562
5566 5563 for_each_memblock(memory, r) {
5567 for_each_memblock(memory, r) { 5564 start_pfn = clamp(memblock_region_memory_base_pfn(r),
5568 start_pfn = clamp(memblock_region_memory_base_pfn(r), 5565 zone_start_pfn, zone_end_pfn);
5569 zone_start_pfn, zone_end_pfn); 5566 end_pfn = clamp(memblock_region_memory_end_pfn(r),
5570 end_pfn = clamp(memblock_region_memory_end_pfn(r), 5567 zone_start_pfn, zone_end_pfn);
5571 zone_start_pfn, zone_end_pfn); 5568
5572 5569 if (zone_type == ZONE_MOVABLE &&
5573 if (zone_type == ZONE_MOVABLE && 5570 memblock_is_mirror(r))
5574 memblock_is_mirror(r)) 5571 nr_absent += end_pfn - start_pfn;
5575 nr_absent += end_pfn - start_pfn; 5572
5576 5573 if (zone_type == ZONE_NORMAL &&
5577 if (zone_type == ZONE_NORMAL && 5574 !memblock_is_mirror(r))
5578 !memblock_is_mirror(r)) 5575 nr_absent += end_pfn - start_pfn;
5579 nr_absent += end_pfn - start_pfn;
5580 }
5581 } else {
5582 if (zone_type == ZONE_NORMAL)
5583 nr_absent += node_end_pfn - zone_movable_pfn[nid];
5584 } 5576 }
5585 } 5577 }
5586 5578