aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBadari Pulavarty <pbadari@us.ibm.com>2008-04-28 05:12:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:17 -0400
commitea01ea937dcae2caa146dea1918cccf2f16ed3c4 (patch)
treeaa3189d587dc04f75bd6a0d79d7f5a764200cd81
parent2a4e2b8780c6df42b19c053243dada7fa4d311ee (diff)
hotplug memory remove: generic __remove_pages() support
Generic helper function to remove section mappings and sysfs entries for the section of the memory we are removing. offline_pages() correctly adjusted zone and marked the pages reserved. TODO: Yasunori Goto is working on patches to free up allocations from bootmem. Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com> Acked-by: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memory_hotplug.h6
-rw-r--r--mm/memory_hotplug.c55
-rw-r--r--mm/sparse.c45
3 files changed, 102 insertions, 4 deletions
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8fee7a45736b..aca9c65f8d08 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -8,6 +8,7 @@
8struct page; 8struct page;
9struct zone; 9struct zone;
10struct pglist_data; 10struct pglist_data;
11struct mem_section;
11 12
12#ifdef CONFIG_MEMORY_HOTPLUG 13#ifdef CONFIG_MEMORY_HOTPLUG
13/* 14/*
@@ -64,9 +65,11 @@ extern int offline_pages(unsigned long, unsigned long, unsigned long);
64/* reasonably generic interface to expand the physical pages in a zone */ 65/* reasonably generic interface to expand the physical pages in a zone */
65extern int __add_pages(struct zone *zone, unsigned long start_pfn, 66extern int __add_pages(struct zone *zone, unsigned long start_pfn,
66 unsigned long nr_pages); 67 unsigned long nr_pages);
68extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
69 unsigned long nr_pages);
67 70
68/* 71/*
69 * Walk thorugh all memory which is registered as resource. 72 * Walk through all memory which is registered as resource.
70 * arg is (start_pfn, nr_pages, private_arg_pointer) 73 * arg is (start_pfn, nr_pages, private_arg_pointer)
71 */ 74 */
72extern int walk_memory_resource(unsigned long start_pfn, 75extern int walk_memory_resource(unsigned long start_pfn,
@@ -176,5 +179,6 @@ extern int arch_add_memory(int nid, u64 start, u64 size);
176extern int remove_memory(u64 start, u64 size); 179extern int remove_memory(u64 start, u64 size);
177extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 180extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
178 int nr_pages); 181 int nr_pages);
182extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
179 183
180#endif /* __LINUX_MEMORY_HOTPLUG_H */ 184#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0fb330271271..d5094929766d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -101,6 +101,25 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
101 return register_new_memory(__pfn_to_section(phys_start_pfn)); 101 return register_new_memory(__pfn_to_section(phys_start_pfn));
102} 102}
103 103
104static int __remove_section(struct zone *zone, struct mem_section *ms)
105{
106 unsigned long flags;
107 struct pglist_data *pgdat = zone->zone_pgdat;
108 int ret = -EINVAL;
109
110 if (!valid_section(ms))
111 return ret;
112
113 ret = unregister_memory_section(ms);
114 if (ret)
115 return ret;
116
117 pgdat_resize_lock(pgdat, &flags);
118 sparse_remove_one_section(zone, ms);
119 pgdat_resize_unlock(pgdat, &flags);
120 return 0;
121}
122
104/* 123/*
105 * Reasonably generic function for adding memory. It is 124 * Reasonably generic function for adding memory. It is
106 * expected that archs that support memory hotplug will 125 * expected that archs that support memory hotplug will
@@ -134,6 +153,42 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
134} 153}
135EXPORT_SYMBOL_GPL(__add_pages); 154EXPORT_SYMBOL_GPL(__add_pages);
136 155
156/**
157 * __remove_pages() - remove sections of pages from a zone
158 * @zone: zone from which pages need to be removed
159 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
160 * @nr_pages: number of pages to remove (must be multiple of section size)
161 *
162 * Generic helper function to remove section mappings and sysfs entries
163 * for the section of the memory we are removing. Caller needs to make
164 * sure that pages are marked reserved and zones are adjust properly by
165 * calling offline_pages().
166 */
167int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
168 unsigned long nr_pages)
169{
170 unsigned long i, ret = 0;
171 int sections_to_remove;
172
173 /*
174 * We can only remove entire sections
175 */
176 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
177 BUG_ON(nr_pages % PAGES_PER_SECTION);
178
179 release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
180
181 sections_to_remove = nr_pages / PAGES_PER_SECTION;
182 for (i = 0; i < sections_to_remove; i++) {
183 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
184 ret = __remove_section(zone, __pfn_to_section(pfn));
185 if (ret)
186 break;
187 }
188 return ret;
189}
190EXPORT_SYMBOL_GPL(__remove_pages);
191
137static void grow_zone_span(struct zone *zone, 192static void grow_zone_span(struct zone *zone,
138 unsigned long start_pfn, unsigned long end_pfn) 193 unsigned long start_pfn, unsigned long end_pfn)
139{ 194{
diff --git a/mm/sparse.c b/mm/sparse.c
index 7e9191381f86..186a85bf7912 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -208,12 +208,13 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p
208} 208}
209 209
210/* 210/*
211 * We need this if we ever free the mem_maps. While not implemented yet, 211 * Decode mem_map from the coded memmap
212 * this function is included for parity with its sibling.
213 */ 212 */
214static __attribute((unused)) 213static
215struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 214struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
216{ 215{
216 /* mask off the extra low bits of information */
217 coded_mem_map &= SECTION_MAP_MASK;
217 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 218 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
218} 219}
219 220
@@ -404,6 +405,28 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
404} 405}
405#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 406#endif /* CONFIG_SPARSEMEM_VMEMMAP */
406 407
408static void free_section_usemap(struct page *memmap, unsigned long *usemap)
409{
410 if (!usemap)
411 return;
412
413 /*
414 * Check to see if allocation came from hot-plug-add
415 */
416 if (PageSlab(virt_to_page(usemap))) {
417 kfree(usemap);
418 if (memmap)
419 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
420 return;
421 }
422
423 /*
424 * TODO: Allocations came from bootmem - how do I free up ?
425 */
426 printk(KERN_WARNING "Not freeing up allocations from bootmem "
427 "- leaking memory\n");
428}
429
407/* 430/*
408 * returns the number of sections whose mem_maps were properly 431 * returns the number of sections whose mem_maps were properly
409 * set. If this is <=0, then that means that the passed-in 432 * set. If this is <=0, then that means that the passed-in
@@ -456,4 +479,20 @@ out:
456 } 479 }
457 return ret; 480 return ret;
458} 481}
482
483void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
484{
485 struct page *memmap = NULL;
486 unsigned long *usemap = NULL;
487
488 if (ms->section_mem_map) {
489 usemap = ms->pageblock_flags;
490 memmap = sparse_decode_mem_map(ms->section_mem_map,
491 __section_nr(ms));
492 ms->section_mem_map = 0;
493 ms->pageblock_flags = NULL;
494 }
495
496 free_section_usemap(memmap, usemap);
497}
459#endif 498#endif