diff options
author | Christoph Hellwig <hch@lst.de> | 2017-12-29 02:53:58 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2018-01-08 14:46:23 -0500 |
commit | a8fc357b2875da8732c91eb085862a0648d82767 (patch) | |
tree | f0c78fe50e93716475f91968dcbba9ff0d3166b4 /mm/sparse-vmemmap.c | |
parent | a99583e780c751003ac9c0105eec9a3b23ec3bc4 (diff) |
mm: split altmap memory map allocation from normal case
No functional changes, just untangling the call chain and document
why the altmap is passed around the hotplug code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r-- | mm/sparse-vmemmap.c | 15 |
1 files changed, 3 insertions, 12 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 376dcf05a39c..d012c9e2811b 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -74,7 +74,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) | |||
74 | } | 74 | } |
75 | 75 | ||
76 | /* need to make sure size is all the same during early stage */ | 76 | /* need to make sure size is all the same during early stage */ |
77 | static void * __meminit alloc_block_buf(unsigned long size, int node) | 77 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) |
78 | { | 78 | { |
79 | void *ptr; | 79 | void *ptr; |
80 | 80 | ||
@@ -129,7 +129,7 @@ static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, | |||
129 | return pfn + nr_align; | 129 | return pfn + nr_align; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void * __meminit altmap_alloc_block_buf(unsigned long size, | 132 | void * __meminit altmap_alloc_block_buf(unsigned long size, |
133 | struct vmem_altmap *altmap) | 133 | struct vmem_altmap *altmap) |
134 | { | 134 | { |
135 | unsigned long pfn, nr_pfns; | 135 | unsigned long pfn, nr_pfns; |
@@ -153,15 +153,6 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size, | |||
153 | return ptr; | 153 | return ptr; |
154 | } | 154 | } |
155 | 155 | ||
156 | /* need to make sure size is all the same during early stage */ | ||
157 | void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node, | ||
158 | struct vmem_altmap *altmap) | ||
159 | { | ||
160 | if (altmap) | ||
161 | return altmap_alloc_block_buf(size, altmap); | ||
162 | return alloc_block_buf(size, node); | ||
163 | } | ||
164 | |||
165 | void __meminit vmemmap_verify(pte_t *pte, int node, | 156 | void __meminit vmemmap_verify(pte_t *pte, int node, |
166 | unsigned long start, unsigned long end) | 157 | unsigned long start, unsigned long end) |
167 | { | 158 | { |
@@ -178,7 +169,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) | |||
178 | pte_t *pte = pte_offset_kernel(pmd, addr); | 169 | pte_t *pte = pte_offset_kernel(pmd, addr); |
179 | if (pte_none(*pte)) { | 170 | if (pte_none(*pte)) { |
180 | pte_t entry; | 171 | pte_t entry; |
181 | void *p = alloc_block_buf(PAGE_SIZE, node); | 172 | void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); |
182 | if (!p) | 173 | if (!p) |
183 | return NULL; | 174 | return NULL; |
184 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); | 175 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |