diff options
author | Li Zhong <zhong@linux.vnet.ibm.com> | 2014-06-11 04:23:38 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2014-08-05 02:34:19 -0400 |
commit | 71b0bfe4f1608dbabb54a1e964046267a2c7f7b3 (patch) | |
tree | 5497483b21288864af24df90b4b7d166ffbf9bb8 | |
parent | ed5694a8464a133582c632f8081324408bcc486d (diff) |
powerpc: implement vmemmap_free()
vmemmap_free() does the opposite of vmemap_populate().
This patch also puts vmemmap_free() and vmemmap_list_free() into
CONFIG_MEMMORY_HOTPLUG.
Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Acked-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/mm/init_64.c | 85 |
1 files changed, 64 insertions, 21 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 69203c8afb51..496379013873 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -298,6 +298,37 @@ static __meminit void vmemmap_list_populate(unsigned long phys, | |||
298 | vmemmap_list = vmem_back; | 298 | vmemmap_list = vmem_back; |
299 | } | 299 | } |
300 | 300 | ||
301 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | ||
302 | { | ||
303 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; | ||
304 | |||
305 | /* Align to the page size of the linear mapping. */ | ||
306 | start = _ALIGN_DOWN(start, page_size); | ||
307 | |||
308 | pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); | ||
309 | |||
310 | for (; start < end; start += page_size) { | ||
311 | void *p; | ||
312 | |||
313 | if (vmemmap_populated(start, page_size)) | ||
314 | continue; | ||
315 | |||
316 | p = vmemmap_alloc_block(page_size, node); | ||
317 | if (!p) | ||
318 | return -ENOMEM; | ||
319 | |||
320 | vmemmap_list_populate(__pa(p), start, node); | ||
321 | |||
322 | pr_debug(" * %016lx..%016lx allocated at %p\n", | ||
323 | start, start + page_size, p); | ||
324 | |||
325 | vmemmap_create_mapping(start, page_size, __pa(p)); | ||
326 | } | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
301 | static unsigned long vmemmap_list_free(unsigned long start) | 332 | static unsigned long vmemmap_list_free(unsigned long start) |
302 | { | 333 | { |
303 | struct vmemmap_backing *vmem_back, *vmem_back_prev; | 334 | struct vmemmap_backing *vmem_back, *vmem_back_prev; |
@@ -330,40 +361,52 @@ static unsigned long vmemmap_list_free(unsigned long start) | |||
330 | return vmem_back->phys; | 361 | return vmem_back->phys; |
331 | } | 362 | } |
332 | 363 | ||
333 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | 364 | void __ref vmemmap_free(unsigned long start, unsigned long end) |
334 | { | 365 | { |
335 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; | 366 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
336 | 367 | ||
337 | /* Align to the page size of the linear mapping. */ | ||
338 | start = _ALIGN_DOWN(start, page_size); | 368 | start = _ALIGN_DOWN(start, page_size); |
339 | 369 | ||
340 | pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); | 370 | pr_debug("vmemmap_free %lx...%lx\n", start, end); |
341 | 371 | ||
342 | for (; start < end; start += page_size) { | 372 | for (; start < end; start += page_size) { |
343 | void *p; | 373 | unsigned long addr; |
344 | 374 | ||
375 | /* | ||
376 | * the section has already be marked as invalid, so | ||
377 | * vmemmap_populated() true means some other sections still | ||
378 | * in this page, so skip it. | ||
379 | */ | ||
345 | if (vmemmap_populated(start, page_size)) | 380 | if (vmemmap_populated(start, page_size)) |
346 | continue; | 381 | continue; |
347 | 382 | ||
348 | p = vmemmap_alloc_block(page_size, node); | 383 | addr = vmemmap_list_free(start); |
349 | if (!p) | 384 | if (addr) { |
350 | return -ENOMEM; | 385 | struct page *page = pfn_to_page(addr >> PAGE_SHIFT); |
351 | 386 | ||
352 | vmemmap_list_populate(__pa(p), start, node); | 387 | if (PageReserved(page)) { |
353 | 388 | /* allocated from bootmem */ | |
354 | pr_debug(" * %016lx..%016lx allocated at %p\n", | 389 | if (page_size < PAGE_SIZE) { |
355 | start, start + page_size, p); | 390 | /* |
356 | 391 | * this shouldn't happen, but if it is | |
357 | vmemmap_create_mapping(start, page_size, __pa(p)); | 392 | * the case, leave the memory there |
393 | */ | ||
394 | WARN_ON_ONCE(1); | ||
395 | } else { | ||
396 | unsigned int nr_pages = | ||
397 | 1 << get_order(page_size); | ||
398 | while (nr_pages--) | ||
399 | free_reserved_page(page++); | ||
400 | } | ||
401 | } else | ||
402 | free_pages((unsigned long)(__va(addr)), | ||
403 | get_order(page_size)); | ||
404 | |||
405 | vmemmap_remove_mapping(start, page_size); | ||
406 | } | ||
358 | } | 407 | } |
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | void vmemmap_free(unsigned long start, unsigned long end) | ||
364 | { | ||
365 | } | 408 | } |
366 | 409 | #endif | |
367 | void register_page_bootmem_memmap(unsigned long section_nr, | 410 | void register_page_bootmem_memmap(unsigned long section_nr, |
368 | struct page *start_page, unsigned long size) | 411 | struct page *start_page, unsigned long size) |
369 | { | 412 | { |