diff options
author | Dan Williams <dan.j.williams@intel.com> | 2019-07-18 18:58:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-18 20:08:07 -0400 |
commit | e9c0a3f05477e18d2dae816cb61b62be1b7e90d3 (patch) | |
tree | 2749369b31e1c646b076592ea63af34a7cc9079f /mm/sparse-vmemmap.c | |
parent | 49ba3c6b37b38b58251c27864f551908c583e99d (diff) |
mm/sparsemem: convert kmalloc_section_memmap() to populate_section_memmap()
Allow sub-section sized ranges to be added to the memmap.
populate_section_memmap() takes an explict pfn range rather than
assuming a full section, and those parameters are plumbed all the way
through to vmmemap_populate(). There should be no sub-section usage in
current deployments. New warnings are added to clarify which memmap
allocation paths are sub-section capable.
Link: http://lkml.kernel.org/r/156092352058.979959.6551283472062305149.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> [ppc64]
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r-- | mm/sparse-vmemmap.c | 21 |
1 files changed, 14 insertions, 7 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 7fec05796796..200aef686722 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -245,19 +245,26 @@ int __meminit vmemmap_populate_basepages(unsigned long start, | |||
245 | return 0; | 245 | return 0; |
246 | } | 246 | } |
247 | 247 | ||
248 | struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, | 248 | struct page * __meminit __populate_section_memmap(unsigned long pfn, |
249 | struct vmem_altmap *altmap) | 249 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
250 | { | 250 | { |
251 | unsigned long start; | 251 | unsigned long start; |
252 | unsigned long end; | 252 | unsigned long end; |
253 | struct page *map; | ||
254 | 253 | ||
255 | map = pfn_to_page(pnum * PAGES_PER_SECTION); | 254 | /* |
256 | start = (unsigned long)map; | 255 | * The minimum granularity of memmap extensions is |
257 | end = (unsigned long)(map + PAGES_PER_SECTION); | 256 | * PAGES_PER_SUBSECTION as allocations are tracked in the |
257 | * 'subsection_map' bitmap of the section. | ||
258 | */ | ||
259 | end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION); | ||
260 | pfn &= PAGE_SUBSECTION_MASK; | ||
261 | nr_pages = end - pfn; | ||
262 | |||
263 | start = (unsigned long) pfn_to_page(pfn); | ||
264 | end = start + nr_pages * sizeof(struct page); | ||
258 | 265 | ||
259 | if (vmemmap_populate(start, end, nid, altmap)) | 266 | if (vmemmap_populate(start, end, nid, altmap)) |
260 | return NULL; | 267 | return NULL; |
261 | 268 | ||
262 | return map; | 269 | return pfn_to_page(pfn); |
263 | } | 270 | } |