diff options
author | Pavel Tatashin <pavel.tatashin@microsoft.com> | 2018-10-26 18:10:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-26 19:38:15 -0400 |
commit | ec393a0f014eaf688a3dbe8c8a4cbb52d7f535f9 (patch) | |
tree | 55f96f2ba77f8adec219a61800e1ad7eafcba470 /mm/page_alloc.c | |
parent | 907ec5fca3dc38d37737de826f06f25b063aa08e (diff) |
mm: return zero_resv_unavail optimization
When checking for valid pfns in zero_resv_unavail(), it is not necessary
to verify that pfns within pageblock_nr_pages ranges are valid, only the
first one needs to be checked. This is because memory for pages are
allocated in contiguous chunks that contain pageblock_nr_pages struct
pages.
Link: http://lkml.kernel.org/r/20181002143821.5112-3-msys.mizuma@gmail.com
Signed-off-by: Pavel Tatashin <pavel.tatashin@microsoft.com>
Signed-off-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 46 |
1 files changed, 26 insertions, 20 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6d863c5afa08..863d46da6586 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -6509,6 +6509,29 @@ void __init free_area_init_node(int nid, unsigned long *zones_size, | |||
6509 | } | 6509 | } |
6510 | 6510 | ||
6511 | #if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) | 6511 | #if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) |
6512 | |||
6513 | /* | ||
6514 | * Zero all valid struct pages in range [spfn, epfn), return number of struct | ||
6515 | * pages zeroed | ||
6516 | */ | ||
6517 | static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn) | ||
6518 | { | ||
6519 | unsigned long pfn; | ||
6520 | u64 pgcnt = 0; | ||
6521 | |||
6522 | for (pfn = spfn; pfn < epfn; pfn++) { | ||
6523 | if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { | ||
6524 | pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) | ||
6525 | + pageblock_nr_pages - 1; | ||
6526 | continue; | ||
6527 | } | ||
6528 | mm_zero_struct_page(pfn_to_page(pfn)); | ||
6529 | pgcnt++; | ||
6530 | } | ||
6531 | |||
6532 | return pgcnt; | ||
6533 | } | ||
6534 | |||
6512 | /* | 6535 | /* |
6513 | * Only struct pages that are backed by physical memory are zeroed and | 6536 | * Only struct pages that are backed by physical memory are zeroed and |
6514 | * initialized by going through __init_single_page(). But, there are some | 6537 | * initialized by going through __init_single_page(). But, there are some |
@@ -6524,7 +6547,6 @@ void __init free_area_init_node(int nid, unsigned long *zones_size, | |||
6524 | void __init zero_resv_unavail(void) | 6547 | void __init zero_resv_unavail(void) |
6525 | { | 6548 | { |
6526 | phys_addr_t start, end; | 6549 | phys_addr_t start, end; |
6527 | unsigned long pfn; | ||
6528 | u64 i, pgcnt; | 6550 | u64 i, pgcnt; |
6529 | phys_addr_t next = 0; | 6551 | phys_addr_t next = 0; |
6530 | 6552 | ||
@@ -6534,34 +6556,18 @@ void __init zero_resv_unavail(void) | |||
6534 | pgcnt = 0; | 6556 | pgcnt = 0; |
6535 | for_each_mem_range(i, &memblock.memory, NULL, | 6557 | for_each_mem_range(i, &memblock.memory, NULL, |
6536 | NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) { | 6558 | NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) { |
6537 | if (next < start) { | 6559 | if (next < start) |
6538 | for (pfn = PFN_DOWN(next); pfn < PFN_UP(start); pfn++) { | 6560 | pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start)); |
6539 | if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) | ||
6540 | continue; | ||
6541 | mm_zero_struct_page(pfn_to_page(pfn)); | ||
6542 | pgcnt++; | ||
6543 | } | ||
6544 | } | ||
6545 | next = end; | 6561 | next = end; |
6546 | } | 6562 | } |
6547 | for (pfn = PFN_DOWN(next); pfn < max_pfn; pfn++) { | 6563 | pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn); |
6548 | if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) | ||
6549 | continue; | ||
6550 | mm_zero_struct_page(pfn_to_page(pfn)); | ||
6551 | pgcnt++; | ||
6552 | } | ||
6553 | |||
6554 | 6564 | ||
6555 | /* | 6565 | /* |
6556 | * Struct pages that do not have backing memory. This could be because | 6566 | * Struct pages that do not have backing memory. This could be because |
6557 | * firmware is using some of this memory, or for some other reasons. | 6567 | * firmware is using some of this memory, or for some other reasons. |
6558 | * Once memblock is changed so such behaviour is not allowed: i.e. | ||
6559 | * list of "reserved" memory must be a subset of list of "memory", then | ||
6560 | * this code can be removed. | ||
6561 | */ | 6568 | */ |
6562 | if (pgcnt) | 6569 | if (pgcnt) |
6563 | pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt); | 6570 | pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt); |
6564 | |||
6565 | } | 6571 | } |
6566 | #endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */ | 6572 | #endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */ |
6567 | 6573 | ||