aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorYasunori Goto <y-goto@jp.fujitsu.com>2008-04-28 05:13:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:26 -0400
commit0c0a4a517a31e05efb38304668198a873bfec6ca (patch)
tree3d02fe9dbf160cd5d328c1e2cf4b40ce37426c5f /mm
parent86f6dae1377523689bd8468fed2f2dd180fc0560 (diff)
memory hotplug: free memmaps allocated by bootmem
This patch is to free memmaps which is allocated by bootmem. Freeing usemap is not necessary. The pages of usemap may be necessary for other sections. If removing section is last section on the node, its section is the final user of usemap page. (usemaps are allocated on its section by previous patch.) But it shouldn't be freed too, because the section must be logical offline state which all pages are isolated against page allocater. If it is freed, page alloctor may use it which will be removed physically soon. It will be disaster. So, this patch keeps it as it is. Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Badari Pulavarty <pbadari@us.ibm.com> Cc: Yinghai Lu <yhlu.kernel@gmail.com> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h3
-rw-r--r--mm/memory_hotplug.c11
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/sparse.c51
4 files changed, 60 insertions, 7 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 789727309f4d..0034e947e4bc 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -34,8 +34,7 @@ static inline void __put_page(struct page *page)
34 atomic_dec(&page->_count); 34 atomic_dec(&page->_count);
35} 35}
36 36
37extern void __init __free_pages_bootmem(struct page *page, 37extern void __free_pages_bootmem(struct page *page, unsigned int order);
38 unsigned int order);
39 38
40/* 39/*
41 * function for dealing with page's order in buddy system. 40 * function for dealing with page's order in buddy system.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index cba36ef0d506..c4ba85c8cb00 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -198,6 +198,16 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
198 return register_new_memory(__pfn_to_section(phys_start_pfn)); 198 return register_new_memory(__pfn_to_section(phys_start_pfn));
199} 199}
200 200
201#ifdef CONFIG_SPARSEMEM_VMEMMAP
202static int __remove_section(struct zone *zone, struct mem_section *ms)
203{
204 /*
205 * XXX: Freeing memmap with vmemmap is not implement yet.
206 * This should be removed later.
207 */
208 return -EBUSY;
209}
210#else
201static int __remove_section(struct zone *zone, struct mem_section *ms) 211static int __remove_section(struct zone *zone, struct mem_section *ms)
202{ 212{
203 unsigned long flags; 213 unsigned long flags;
@@ -216,6 +226,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms)
216 pgdat_resize_unlock(pgdat, &flags); 226 pgdat_resize_unlock(pgdat, &flags);
217 return 0; 227 return 0;
218} 228}
229#endif
219 230
220/* 231/*
221 * Reasonably generic function for adding memory. It is 232 * Reasonably generic function for adding memory. It is
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e0fc3baba843..d3358efdf4e6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -546,7 +546,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
546/* 546/*
547 * permit the bootmem allocator to evade page validation on high-order frees 547 * permit the bootmem allocator to evade page validation on high-order frees
548 */ 548 */
549void __init __free_pages_bootmem(struct page *page, unsigned int order) 549void __free_pages_bootmem(struct page *page, unsigned int order)
550{ 550{
551 if (order == 0) { 551 if (order == 0) {
552 __ClearPageReserved(page); 552 __ClearPageReserved(page);
diff --git a/mm/sparse.c b/mm/sparse.c
index 08f053218ee8..dff71f173ae9 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include "internal.h"
11#include <asm/dma.h> 12#include <asm/dma.h>
12#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
@@ -376,6 +377,9 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
376{ 377{
377 return; /* XXX: Not implemented yet */ 378 return; /* XXX: Not implemented yet */
378} 379}
380static void free_map_bootmem(struct page *page, unsigned long nr_pages)
381{
382}
379#else 383#else
380static struct page *__kmalloc_section_memmap(unsigned long nr_pages) 384static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
381{ 385{
@@ -413,17 +417,47 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
413 free_pages((unsigned long)memmap, 417 free_pages((unsigned long)memmap,
414 get_order(sizeof(struct page) * nr_pages)); 418 get_order(sizeof(struct page) * nr_pages));
415} 419}
420
421static void free_map_bootmem(struct page *page, unsigned long nr_pages)
422{
423 unsigned long maps_section_nr, removing_section_nr, i;
424 int magic;
425
426 for (i = 0; i < nr_pages; i++, page++) {
427 magic = atomic_read(&page->_mapcount);
428
429 BUG_ON(magic == NODE_INFO);
430
431 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
432 removing_section_nr = page->private;
433
434 /*
435 * When this function is called, the removing section is
436 * logical offlined state. This means all pages are isolated
437 * from page allocator. If removing section's memmap is placed
438 * on the same section, it must not be freed.
439 * If it is freed, page allocator may allocate it which will
440 * be removed physically soon.
441 */
442 if (maps_section_nr != removing_section_nr)
443 put_page_bootmem(page);
444 }
445}
416#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 446#endif /* CONFIG_SPARSEMEM_VMEMMAP */
417 447
418static void free_section_usemap(struct page *memmap, unsigned long *usemap) 448static void free_section_usemap(struct page *memmap, unsigned long *usemap)
419{ 449{
450 struct page *usemap_page;
451 unsigned long nr_pages;
452
420 if (!usemap) 453 if (!usemap)
421 return; 454 return;
422 455
456 usemap_page = virt_to_page(usemap);
423 /* 457 /*
424 * Check to see if allocation came from hot-plug-add 458 * Check to see if allocation came from hot-plug-add
425 */ 459 */
426 if (PageSlab(virt_to_page(usemap))) { 460 if (PageSlab(usemap_page)) {
427 kfree(usemap); 461 kfree(usemap);
428 if (memmap) 462 if (memmap)
429 __kfree_section_memmap(memmap, PAGES_PER_SECTION); 463 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
@@ -431,10 +465,19 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
431 } 465 }
432 466
433 /* 467 /*
434 * TODO: Allocations came from bootmem - how do I free up ? 468 * The usemap came from bootmem. This is packed with other usemaps
469 * on the section which has pgdat at boot time. Just keep it as is now.
435 */ 470 */
436 printk(KERN_WARNING "Not freeing up allocations from bootmem " 471
437 "- leaking memory\n"); 472 if (memmap) {
473 struct page *memmap_page;
474 memmap_page = virt_to_page(memmap);
475
476 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
477 >> PAGE_SHIFT;
478
479 free_map_bootmem(memmap_page, nr_pages);
480 }
438} 481}
439 482
440/* 483/*