aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/init_64.c62
1 files changed, 52 insertions, 10 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index e3734edffa69..fa5d28b4e726 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -226,14 +226,24 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
226#endif /* CONFIG_PPC_BOOK3E */ 226#endif /* CONFIG_PPC_BOOK3E */
227 227
228struct vmemmap_backing *vmemmap_list; 228struct vmemmap_backing *vmemmap_list;
229static struct vmemmap_backing *next;
230static int num_left;
231static int num_freed;
229 232
230static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) 233static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
231{ 234{
232 static struct vmemmap_backing *next; 235 struct vmemmap_backing *vmem_back;
233 static int num_left; 236 /* get from freed entries first */
237 if (num_freed) {
238 num_freed--;
239 vmem_back = next;
240 next = next->list;
241
242 return vmem_back;
243 }
234 244
235 /* allocate a page when required and hand out chunks */ 245 /* allocate a page when required and hand out chunks */
236 if (!next || !num_left) { 246 if (!num_left) {
237 next = vmemmap_alloc_block(PAGE_SIZE, node); 247 next = vmemmap_alloc_block(PAGE_SIZE, node);
238 if (unlikely(!next)) { 248 if (unlikely(!next)) {
239 WARN_ON(1); 249 WARN_ON(1);
@@ -266,6 +276,38 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
266 vmemmap_list = vmem_back; 276 vmemmap_list = vmem_back;
267} 277}
268 278
279static unsigned long vmemmap_list_free(unsigned long start)
280{
281 struct vmemmap_backing *vmem_back, *vmem_back_prev;
282
283 vmem_back_prev = vmem_back = vmemmap_list;
284
285 /* look for it with prev pointer recorded */
286 for (; vmem_back; vmem_back = vmem_back->list) {
287 if (vmem_back->virt_addr == start)
288 break;
289 vmem_back_prev = vmem_back;
290 }
291
292 if (unlikely(!vmem_back)) {
293 WARN_ON(1);
294 return 0;
295 }
296
297 /* remove it from vmemmap_list */
298 if (vmem_back == vmemmap_list) /* remove head */
299 vmemmap_list = vmem_back->list;
300 else
301 vmem_back_prev->list = vmem_back->list;
302
303 /* next point to this freed entry */
304 vmem_back->list = next;
305 next = vmem_back;
306 num_freed++;
307
308 return vmem_back->phys;
309}
310
269int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 311int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
270{ 312{
271 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 313 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
@@ -331,16 +373,16 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
331 if (pg_va < vmem_back->virt_addr) 373 if (pg_va < vmem_back->virt_addr)
332 continue; 374 continue;
333 375
334 /* Check that page struct is not split between real pages */ 376 /* After vmemmap_list entry free is possible, need check all */
335 if ((pg_va + sizeof(struct page)) > 377 if ((pg_va + sizeof(struct page)) <=
336 (vmem_back->virt_addr + page_size)) 378 (vmem_back->virt_addr + page_size)) {
337 return NULL; 379 page = (struct page *) (vmem_back->phys + pg_va -
338
339 page = (struct page *) (vmem_back->phys + pg_va -
340 vmem_back->virt_addr); 380 vmem_back->virt_addr);
341 return page; 381 return page;
382 }
342 } 383 }
343 384
385 /* Probably that page struct is split between real pages */
344 return NULL; 386 return NULL;
345} 387}
346EXPORT_SYMBOL_GPL(realmode_pfn_to_page); 388EXPORT_SYMBOL_GPL(realmode_pfn_to_page);