aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2019-07-18 18:58:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-18 20:08:07 -0400
commit326e1b8f83a4318b09033ef754f40c785aed5e68 (patch)
tree866ad9a9dad9ff97b3da82ff72e339524aa499c3 /mm
parentf1eca35a0dc7cb3cdb00c88c8c5e5138a65face0 (diff)
mm/sparsemem: introduce a SECTION_IS_EARLY flag
In preparation for sub-section hotplug, track whether a given section was created during early memory initialization, or later via memory hotplug. This distinction is needed to maintain the coarse expectation that pfn_valid() returns true for any pfn within a given section even if that section has pages that are reserved from the page allocator. For example one of the of goals of subsection hotplug is to support cases where the system physical memory layout collides System RAM and PMEM within a section. Several pfn_valid() users expect to just check if a section is valid, but they are not careful to check if the given pfn is within a "System RAM" boundary and instead expect pgdat information to further validate the pfn. Rather than unwind those paths to make their pfn_valid() queries more precise a follow on patch uses the SECTION_IS_EARLY flag to maintain the traditional expectation that pfn_valid() returns true for all early sections. Link: https://lore.kernel.org/lkml/1560366952-10660-1-git-send-email-cai@lca.pw/ Link: http://lkml.kernel.org/r/156092350358.979959.5817209875548072819.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reported-by: Qian Cai <cai@lca.pw> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> [ppc64] Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Michal Hocko <mhocko@suse.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: David Hildenbrand <david@redhat.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richardw.yang@linux.intel.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/sparse.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 41bef8e1f65c..6d23a526279a 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -288,11 +288,11 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn
288 288
289static void __meminit sparse_init_one_section(struct mem_section *ms, 289static void __meminit sparse_init_one_section(struct mem_section *ms,
290 unsigned long pnum, struct page *mem_map, 290 unsigned long pnum, struct page *mem_map,
291 struct mem_section_usage *usage) 291 struct mem_section_usage *usage, unsigned long flags)
292{ 292{
293 ms->section_mem_map &= ~SECTION_MAP_MASK; 293 ms->section_mem_map &= ~SECTION_MAP_MASK;
294 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | 294 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
295 SECTION_HAS_MEM_MAP; 295 | SECTION_HAS_MEM_MAP | flags;
296 ms->usage = usage; 296 ms->usage = usage;
297} 297}
298 298
@@ -497,7 +497,8 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
497 goto failed; 497 goto failed;
498 } 498 }
499 check_usemap_section_nr(nid, usage); 499 check_usemap_section_nr(nid, usage);
500 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage); 500 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
501 SECTION_IS_EARLY);
501 usage = (void *) usage + mem_section_usage_size(); 502 usage = (void *) usage + mem_section_usage_size();
502 } 503 }
503 sparse_buffer_fini(); 504 sparse_buffer_fini();
@@ -732,7 +733,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
732 733
733 set_section_nid(section_nr, nid); 734 set_section_nid(section_nr, nid);
734 section_mark_present(ms); 735 section_mark_present(ms);
735 sparse_init_one_section(ms, section_nr, memmap, usage); 736 sparse_init_one_section(ms, section_nr, memmap, usage, 0);
736 737
737out: 738out:
738 if (ret < 0) { 739 if (ret < 0) {
@@ -772,19 +773,16 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
772} 773}
773#endif 774#endif
774 775
775static void free_section_usage(struct page *memmap, 776static void free_section_usage(struct mem_section *ms, struct page *memmap,
776 struct mem_section_usage *usage, struct vmem_altmap *altmap) 777 struct mem_section_usage *usage, struct vmem_altmap *altmap)
777{ 778{
778 struct page *usage_page;
779
780 if (!usage) 779 if (!usage)
781 return; 780 return;
782 781
783 usage_page = virt_to_page(usage);
784 /* 782 /*
785 * Check to see if allocation came from hot-plug-add 783 * Check to see if allocation came from hot-plug-add
786 */ 784 */
787 if (PageSlab(usage_page) || PageCompound(usage_page)) { 785 if (!early_section(ms)) {
788 kfree(usage); 786 kfree(usage);
789 if (memmap) 787 if (memmap)
790 __kfree_section_memmap(memmap, altmap); 788 __kfree_section_memmap(memmap, altmap);
@@ -816,6 +814,6 @@ void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset,
816 814
817 clear_hwpoisoned_pages(memmap + map_offset, 815 clear_hwpoisoned_pages(memmap + map_offset,
818 PAGES_PER_SECTION - map_offset); 816 PAGES_PER_SECTION - map_offset);
819 free_section_usage(memmap, usage, altmap); 817 free_section_usage(ms, memmap, usage, altmap);
820} 818}
821#endif /* CONFIG_MEMORY_HOTPLUG */ 819#endif /* CONFIG_MEMORY_HOTPLUG */