diff options
author | Paul Burton <paul.burton@imgtec.com> | 2017-02-22 18:44:53 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-22 19:41:29 -0500 |
commit | b92df1de5d289c0b5d653e72414bf0850b8511e0 (patch) | |
tree | 6604acfffcd8f9ca58499f892531bd08dc117fea /mm/page_alloc.c | |
parent | 7f354a548d1cb6bb01b6ee74aee9264aa152f1ec (diff) |
mm: page_alloc: skip over regions of invalid pfns where possible
When using a sparse memory model memmap_init_zone() when invoked with
the MEMMAP_EARLY context will skip over pages which aren't valid - ie.
which aren't in a populated region of the sparse memory map. However if
the memory map is extremely sparse then it can spend a long time
linearly checking each PFN in a large non-populated region of the memory
map & skipping it in turn.
When CONFIG_HAVE_MEMBLOCK_NODE_MAP is enabled, we have sufficient
information to quickly discover the next valid PFN given an invalid one
by searching through the list of memory regions & skipping forwards to
the first PFN covered by the memory region to the right of the
non-populated region. Implement this in order to speed up
memmap_init_zone() for systems with extremely sparse memory maps.
James said "I have tested this patch on a virtual model of a Samurai CPU
with a sparse memory map. The kernel boot time drops from 109 to
62 seconds. "
Link: http://lkml.kernel.org/r/20161125185518.29885-1-paul.burton@imgtec.com
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Tested-by: James Hartley <james.hartley@imgtec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 05c0a59323bd..6da3169d3750 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5103,8 +5103,17 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
5103 | if (context != MEMMAP_EARLY) | 5103 | if (context != MEMMAP_EARLY) |
5104 | goto not_early; | 5104 | goto not_early; |
5105 | 5105 | ||
5106 | if (!early_pfn_valid(pfn)) | 5106 | if (!early_pfn_valid(pfn)) { |
5107 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | ||
5108 | /* | ||
5109 | * Skip to the pfn preceding the next valid one (or | ||
5110 | * end_pfn), such that we hit a valid pfn (or end_pfn) | ||
5111 | * on our next iteration of the loop. | ||
5112 | */ | ||
5113 | pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1; | ||
5114 | #endif | ||
5107 | continue; | 5115 | continue; |
5116 | } | ||
5108 | if (!early_pfn_in_nid(pfn, nid)) | 5117 | if (!early_pfn_in_nid(pfn, nid)) |
5109 | continue; | 5118 | continue; |
5110 | if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) | 5119 | if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) |