aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse-vmemmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r--mm/sparse-vmemmap.c67
1 files changed, 23 insertions, 44 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 17acf01791fa..bd0276d5f66b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -74,7 +74,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
74} 74}
75 75
76/* need to make sure size is all the same during early stage */ 76/* need to make sure size is all the same during early stage */
77static void * __meminit alloc_block_buf(unsigned long size, int node) 77void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
78{ 78{
79 void *ptr; 79 void *ptr;
80 80
@@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
107} 107}
108 108
109/** 109/**
110 * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation 110 * altmap_alloc_block_buf - allocate pages from the device page map
111 * @altmap - reserved page pool for the allocation 111 * @altmap: device page map
112 * @nr_pfns - size (in pages) of the allocation 112 * @size: size (in bytes) of the allocation
113 * 113 *
114 * Allocations are aligned to the size of the request 114 * Allocations are aligned to the size of the request.
115 */ 115 */
116static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, 116void * __meminit altmap_alloc_block_buf(unsigned long size,
117 unsigned long nr_pfns)
118{
119 unsigned long pfn = vmem_altmap_next_pfn(altmap);
120 unsigned long nr_align;
121
122 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
123 nr_align = ALIGN(pfn, nr_align) - pfn;
124
125 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
126 return ULONG_MAX;
127 altmap->alloc += nr_pfns;
128 altmap->align += nr_align;
129 return pfn + nr_align;
130}
131
132static void * __meminit altmap_alloc_block_buf(unsigned long size,
133 struct vmem_altmap *altmap) 117 struct vmem_altmap *altmap)
134{ 118{
135 unsigned long pfn, nr_pfns; 119 unsigned long pfn, nr_pfns, nr_align;
136 void *ptr;
137 120
138 if (size & ~PAGE_MASK) { 121 if (size & ~PAGE_MASK) {
139 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", 122 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
@@ -141,25 +124,20 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size,
141 return NULL; 124 return NULL;
142 } 125 }
143 126
127 pfn = vmem_altmap_next_pfn(altmap);
144 nr_pfns = size >> PAGE_SHIFT; 128 nr_pfns = size >> PAGE_SHIFT;
145 pfn = vmem_altmap_alloc(altmap, nr_pfns); 129 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
146 if (pfn < ULONG_MAX) 130 nr_align = ALIGN(pfn, nr_align) - pfn;
147 ptr = __va(__pfn_to_phys(pfn)); 131 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
148 else 132 return NULL;
149 ptr = NULL;
150 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
151 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
152 133
153 return ptr; 134 altmap->alloc += nr_pfns;
154} 135 altmap->align += nr_align;
136 pfn += nr_align;
155 137
156/* need to make sure size is all the same during early stage */ 138 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
157void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node, 139 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
158 struct vmem_altmap *altmap) 140 return __va(__pfn_to_phys(pfn));
159{
160 if (altmap)
161 return altmap_alloc_block_buf(size, altmap);
162 return alloc_block_buf(size, node);
163} 141}
164 142
165void __meminit vmemmap_verify(pte_t *pte, int node, 143void __meminit vmemmap_verify(pte_t *pte, int node,
@@ -178,7 +156,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
178 pte_t *pte = pte_offset_kernel(pmd, addr); 156 pte_t *pte = pte_offset_kernel(pmd, addr);
179 if (pte_none(*pte)) { 157 if (pte_none(*pte)) {
180 pte_t entry; 158 pte_t entry;
181 void *p = alloc_block_buf(PAGE_SIZE, node); 159 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
182 if (!p) 160 if (!p)
183 return NULL; 161 return NULL;
184 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 162 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
@@ -278,7 +256,8 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
278 return 0; 256 return 0;
279} 257}
280 258
281struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) 259struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
260 struct vmem_altmap *altmap)
282{ 261{
283 unsigned long start; 262 unsigned long start;
284 unsigned long end; 263 unsigned long end;
@@ -288,7 +267,7 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
288 start = (unsigned long)map; 267 start = (unsigned long)map;
289 end = (unsigned long)(map + PAGES_PER_SECTION); 268 end = (unsigned long)(map + PAGES_PER_SECTION);
290 269
291 if (vmemmap_populate(start, end, nid)) 270 if (vmemmap_populate(start, end, nid, altmap))
292 return NULL; 271 return NULL;
293 272
294 return map; 273 return map;
@@ -318,7 +297,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
318 if (!present_section_nr(pnum)) 297 if (!present_section_nr(pnum))
319 continue; 298 continue;
320 299
321 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); 300 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
322 if (map_map[pnum]) 301 if (map_map[pnum])
323 continue; 302 continue;
324 ms = __nr_to_section(pnum); 303 ms = __nr_to_section(pnum);