summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2016-03-15 17:55:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commitb72d0ffb5dbc4070089b36230b98687ca4577cbc (patch)
treea10a1f4622656e5444bf8e04fe78080a42621953 /mm/page_alloc.c
parent342332e6a925e9ed015e5465062c38d2b86ec8f9 (diff)
mm/page_alloc.c: rework code layout in memmap_init_zone()
This function is getting full of weird tricks to avoid word-wrapping. Use a goto to eliminate a tab stop then use the new space Cc: Taku Izumi <izumi.taku@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c79
1 files changed, 38 insertions, 41 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b8160b9d5e72..fe4378fc0ab6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4508,54 +4508,51 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4508 4508
4509 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4509 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4510 /* 4510 /*
4511 * There can be holes in boot-time mem_map[]s 4511 * There can be holes in boot-time mem_map[]s handed to this
4512 * handed to this function. They do not 4512 * function. They do not exist on hotplugged memory.
4513 * exist on hotplugged memory.
4514 */ 4513 */
4515 if (context == MEMMAP_EARLY) { 4514 if (context != MEMMAP_EARLY)
4516 if (!early_pfn_valid(pfn)) 4515 goto not_early;
4517 continue; 4516
4518 if (!early_pfn_in_nid(pfn, nid)) 4517 if (!early_pfn_valid(pfn))
4519 continue; 4518 continue;
4520 if (!update_defer_init(pgdat, pfn, end_pfn, 4519 if (!early_pfn_in_nid(pfn, nid))
4521 &nr_initialised)) 4520 continue;
4522 break; 4521 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
4522 break;
4523 4523
4524#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4524#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4525 /* 4525 /*
4526 * if not mirrored_kernelcore and ZONE_MOVABLE exists, 4526 * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
4527 * range from zone_movable_pfn[nid] to end of each node 4527 * from zone_movable_pfn[nid] to end of each node should be
4528 * should be ZONE_MOVABLE not ZONE_NORMAL. skip it. 4528 * ZONE_MOVABLE not ZONE_NORMAL. skip it.
4529 */ 4529 */
4530 if (!mirrored_kernelcore && zone_movable_pfn[nid]) 4530 if (!mirrored_kernelcore && zone_movable_pfn[nid])
4531 if (zone == ZONE_NORMAL && 4531 if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
4532 pfn >= zone_movable_pfn[nid]) 4532 continue;
4533 continue;
4534 4533
4535 /* 4534 /*
4536 * check given memblock attribute by firmware which 4535 * Check given memblock attribute by firmware which can affect
4537 * can affect kernel memory layout. 4536 * kernel memory layout. If zone==ZONE_MOVABLE but memory is
4538 * if zone==ZONE_MOVABLE but memory is mirrored, 4537 * mirrored, it's an overlapped memmap init. skip it.
4539 * it's an overlapped memmap init. skip it. 4538 */
4540 */ 4539 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
4541 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 4540 if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
4542 if (!r || 4541 for_each_memblock(memory, tmp)
4543 pfn >= memblock_region_memory_end_pfn(r)) { 4542 if (pfn < memblock_region_memory_end_pfn(tmp))
4544 for_each_memblock(memory, tmp) 4543 break;
4545 if (pfn < memblock_region_memory_end_pfn(tmp)) 4544 r = tmp;
4546 break; 4545 }
4547 r = tmp; 4546 if (pfn >= memblock_region_memory_base_pfn(r) &&
4548 } 4547 memblock_is_mirror(r)) {
4549 if (pfn >= memblock_region_memory_base_pfn(r) && 4548 /* already initialized as NORMAL */
4550 memblock_is_mirror(r)) { 4549 pfn = memblock_region_memory_end_pfn(r);
4551 /* already initialized as NORMAL */ 4550 continue;
4552 pfn = memblock_region_memory_end_pfn(r);
4553 continue;
4554 }
4555 } 4551 }
4556#endif
4557 } 4552 }
4553#endif
4558 4554
4555not_early:
4559 /* 4556 /*
4560 * Mark the block movable so that blocks are reserved for 4557 * Mark the block movable so that blocks are reserved for
4561 * movable at startup. This will force kernel allocations 4558 * movable at startup. This will force kernel allocations