diff options
author | Badari Pulavarty <pbadari@us.ibm.com> | 2008-04-28 05:12:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 11:58:17 -0400 |
commit | ea01ea937dcae2caa146dea1918cccf2f16ed3c4 (patch) | |
tree | aa3189d587dc04f75bd6a0d79d7f5a764200cd81 /mm | |
parent | 2a4e2b8780c6df42b19c053243dada7fa4d311ee (diff) |
hotplug memory remove: generic __remove_pages() support
Generic helper function to remove section mappings and sysfs entries for the
section of the memory we are removing. offline_pages() correctly adjusted
zone and marked the pages reserved.
TODO: Yasunori Goto is working on patches to free up allocations from bootmem.
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Acked-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory_hotplug.c | 55 | ||||
-rw-r--r-- | mm/sparse.c | 45 |
2 files changed, 97 insertions, 3 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0fb330271271..d5094929766d 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -101,6 +101,25 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn) | |||
101 | return register_new_memory(__pfn_to_section(phys_start_pfn)); | 101 | return register_new_memory(__pfn_to_section(phys_start_pfn)); |
102 | } | 102 | } |
103 | 103 | ||
104 | static int __remove_section(struct zone *zone, struct mem_section *ms) | ||
105 | { | ||
106 | unsigned long flags; | ||
107 | struct pglist_data *pgdat = zone->zone_pgdat; | ||
108 | int ret = -EINVAL; | ||
109 | |||
110 | if (!valid_section(ms)) | ||
111 | return ret; | ||
112 | |||
113 | ret = unregister_memory_section(ms); | ||
114 | if (ret) | ||
115 | return ret; | ||
116 | |||
117 | pgdat_resize_lock(pgdat, &flags); | ||
118 | sparse_remove_one_section(zone, ms); | ||
119 | pgdat_resize_unlock(pgdat, &flags); | ||
120 | return 0; | ||
121 | } | ||
122 | |||
104 | /* | 123 | /* |
105 | * Reasonably generic function for adding memory. It is | 124 | * Reasonably generic function for adding memory. It is |
106 | * expected that archs that support memory hotplug will | 125 | * expected that archs that support memory hotplug will |
@@ -134,6 +153,42 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
134 | } | 153 | } |
135 | EXPORT_SYMBOL_GPL(__add_pages); | 154 | EXPORT_SYMBOL_GPL(__add_pages); |
136 | 155 | ||
156 | /** | ||
157 | * __remove_pages() - remove sections of pages from a zone | ||
158 | * @zone: zone from which pages need to be removed | ||
159 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | ||
160 | * @nr_pages: number of pages to remove (must be multiple of section size) | ||
161 | * | ||
162 | * Generic helper function to remove section mappings and sysfs entries | ||
163 | * for the section of the memory we are removing. Caller needs to make | ||
164 | * sure that pages are marked reserved and zones are adjust properly by | ||
165 | * calling offline_pages(). | ||
166 | */ | ||
167 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | ||
168 | unsigned long nr_pages) | ||
169 | { | ||
170 | unsigned long i, ret = 0; | ||
171 | int sections_to_remove; | ||
172 | |||
173 | /* | ||
174 | * We can only remove entire sections | ||
175 | */ | ||
176 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | ||
177 | BUG_ON(nr_pages % PAGES_PER_SECTION); | ||
178 | |||
179 | release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); | ||
180 | |||
181 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | ||
182 | for (i = 0; i < sections_to_remove; i++) { | ||
183 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | ||
184 | ret = __remove_section(zone, __pfn_to_section(pfn)); | ||
185 | if (ret) | ||
186 | break; | ||
187 | } | ||
188 | return ret; | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(__remove_pages); | ||
191 | |||
137 | static void grow_zone_span(struct zone *zone, | 192 | static void grow_zone_span(struct zone *zone, |
138 | unsigned long start_pfn, unsigned long end_pfn) | 193 | unsigned long start_pfn, unsigned long end_pfn) |
139 | { | 194 | { |
diff --git a/mm/sparse.c b/mm/sparse.c index 7e9191381f86..186a85bf7912 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -208,12 +208,13 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p | |||
208 | } | 208 | } |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * We need this if we ever free the mem_maps. While not implemented yet, | 211 | * Decode mem_map from the coded memmap |
212 | * this function is included for parity with its sibling. | ||
213 | */ | 212 | */ |
214 | static __attribute((unused)) | 213 | static |
215 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) | 214 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
216 | { | 215 | { |
216 | /* mask off the extra low bits of information */ | ||
217 | coded_mem_map &= SECTION_MAP_MASK; | ||
217 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); | 218 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
218 | } | 219 | } |
219 | 220 | ||
@@ -404,6 +405,28 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |||
404 | } | 405 | } |
405 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 406 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
406 | 407 | ||
408 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) | ||
409 | { | ||
410 | if (!usemap) | ||
411 | return; | ||
412 | |||
413 | /* | ||
414 | * Check to see if allocation came from hot-plug-add | ||
415 | */ | ||
416 | if (PageSlab(virt_to_page(usemap))) { | ||
417 | kfree(usemap); | ||
418 | if (memmap) | ||
419 | __kfree_section_memmap(memmap, PAGES_PER_SECTION); | ||
420 | return; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * TODO: Allocations came from bootmem - how do I free up ? | ||
425 | */ | ||
426 | printk(KERN_WARNING "Not freeing up allocations from bootmem " | ||
427 | "- leaking memory\n"); | ||
428 | } | ||
429 | |||
407 | /* | 430 | /* |
408 | * returns the number of sections whose mem_maps were properly | 431 | * returns the number of sections whose mem_maps were properly |
409 | * set. If this is <=0, then that means that the passed-in | 432 | * set. If this is <=0, then that means that the passed-in |
@@ -456,4 +479,20 @@ out: | |||
456 | } | 479 | } |
457 | return ret; | 480 | return ret; |
458 | } | 481 | } |
482 | |||
483 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) | ||
484 | { | ||
485 | struct page *memmap = NULL; | ||
486 | unsigned long *usemap = NULL; | ||
487 | |||
488 | if (ms->section_mem_map) { | ||
489 | usemap = ms->pageblock_flags; | ||
490 | memmap = sparse_decode_mem_map(ms->section_mem_map, | ||
491 | __section_nr(ms)); | ||
492 | ms->section_mem_map = 0; | ||
493 | ms->pageblock_flags = NULL; | ||
494 | } | ||
495 | |||
496 | free_section_usemap(memmap, usemap); | ||
497 | } | ||
459 | #endif | 498 | #endif |