aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorBadari Pulavarty <pbadari@us.ibm.com>2008-04-28 05:12:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:17 -0400
commitea01ea937dcae2caa146dea1918cccf2f16ed3c4 (patch)
treeaa3189d587dc04f75bd6a0d79d7f5a764200cd81 /mm/sparse.c
parent2a4e2b8780c6df42b19c053243dada7fa4d311ee (diff)
hotplug memory remove: generic __remove_pages() support
Generic helper function to remove section mappings and sysfs entries for the section of the memory we are removing. offline_pages() correctly adjusted zone and marked the pages reserved. TODO: Yasunori Goto is working on patches to free up allocations from bootmem. Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com> Acked-by: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c45
1 files changed, 42 insertions, 3 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 7e9191381f86..186a85bf7912 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -208,12 +208,13 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p
208} 208}
209 209
210/* 210/*
211 * We need this if we ever free the mem_maps. While not implemented yet, 211 * Decode mem_map from the coded memmap
212 * this function is included for parity with its sibling.
213 */ 212 */
214static __attribute((unused)) 213static
215struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 214struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
216{ 215{
216 /* mask off the extra low bits of information */
217 coded_mem_map &= SECTION_MAP_MASK;
217 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 218 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
218} 219}
219 220
@@ -404,6 +405,28 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
404} 405}
405#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 406#endif /* CONFIG_SPARSEMEM_VMEMMAP */
406 407
408static void free_section_usemap(struct page *memmap, unsigned long *usemap)
409{
410 if (!usemap)
411 return;
412
413 /*
414 * Check to see if allocation came from hot-plug-add
415 */
416 if (PageSlab(virt_to_page(usemap))) {
417 kfree(usemap);
418 if (memmap)
419 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
420 return;
421 }
422
423 /*
424 * TODO: Allocations came from bootmem - how do I free up ?
425 */
426 printk(KERN_WARNING "Not freeing up allocations from bootmem "
427 "- leaking memory\n");
428}
429
407/* 430/*
408 * returns the number of sections whose mem_maps were properly 431 * returns the number of sections whose mem_maps were properly
409 * set. If this is <=0, then that means that the passed-in 432 * set. If this is <=0, then that means that the passed-in
@@ -456,4 +479,20 @@ out:
456 } 479 }
457 return ret; 480 return ret;
458} 481}
482
483void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
484{
485 struct page *memmap = NULL;
486 unsigned long *usemap = NULL;
487
488 if (ms->section_mem_map) {
489 usemap = ms->pageblock_flags;
490 memmap = sparse_decode_mem_map(ms->section_mem_map,
491 __section_nr(ms));
492 ms->section_mem_map = 0;
493 ms->pageblock_flags = NULL;
494 }
495
496 free_section_usemap(memmap, usemap);
497}
459#endif 498#endif