diff options
author | Yasunori Goto <y-goto@jp.fujitsu.com> | 2007-10-16 04:26:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:43:02 -0400 |
commit | 98f3cfc1dc7a53b629d43b7844a9b3f786213048 (patch) | |
tree | de98ff0e3c25b01863c31abc0f3054de978b9ef8 /mm/sparse.c | |
parent | 48e94196a533dbee17c252bf80d0310fb8c8c2eb (diff) |
memory hotplug: Hot-add with sparsemem-vmemmap
This patch is to avoid panic when memory hot-add is executed with
sparsemem-vmemmap. Current vmemmap-sparsemem code doesn't support memory
hot-add. Vmemmap must be populated when hot-add. This is for
2.6.23-rc2-mm2.
Todo: # Even if this patch is applied, the message "[xxxx-xxxx] potential
offnode page_structs" is displayed. To allocate memmap on its node,
memmap (and pgdat) must be initialized itself like chicken and
egg relationship.
# vmemmap_unpopulate will be necessary for followings.
- For cancel hot-add due to error.
- For unplug.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r-- | mm/sparse.c | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 1f4dbb867b8a..08fb14f5eea3 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -259,7 +259,7 @@ static unsigned long *sparse_early_usemap_alloc(unsigned long pnum) | |||
259 | } | 259 | } |
260 | 260 | ||
261 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 261 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
262 | struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) | 262 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
263 | { | 263 | { |
264 | struct page *map; | 264 | struct page *map; |
265 | 265 | ||
@@ -284,7 +284,7 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
284 | struct mem_section *ms = __nr_to_section(pnum); | 284 | struct mem_section *ms = __nr_to_section(pnum); |
285 | int nid = sparse_early_nid(ms); | 285 | int nid = sparse_early_nid(ms); |
286 | 286 | ||
287 | map = sparse_early_mem_map_populate(pnum, nid); | 287 | map = sparse_mem_map_populate(pnum, nid); |
288 | if (map) | 288 | if (map) |
289 | return map; | 289 | return map; |
290 | 290 | ||
@@ -322,6 +322,18 @@ void __init sparse_init(void) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | #ifdef CONFIG_MEMORY_HOTPLUG | 324 | #ifdef CONFIG_MEMORY_HOTPLUG |
325 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
326 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | ||
327 | unsigned long nr_pages) | ||
328 | { | ||
329 | /* This will make the necessary allocations eventually. */ | ||
330 | return sparse_mem_map_populate(pnum, nid); | ||
331 | } | ||
332 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | ||
333 | { | ||
334 | return; /* XXX: Not implemented yet */ | ||
335 | } | ||
336 | #else | ||
325 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | 337 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) |
326 | { | 338 | { |
327 | struct page *page, *ret; | 339 | struct page *page, *ret; |
@@ -344,6 +356,12 @@ got_map_ptr: | |||
344 | return ret; | 356 | return ret; |
345 | } | 357 | } |
346 | 358 | ||
359 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | ||
360 | unsigned long nr_pages) | ||
361 | { | ||
362 | return __kmalloc_section_memmap(nr_pages); | ||
363 | } | ||
364 | |||
347 | static int vaddr_in_vmalloc_area(void *addr) | 365 | static int vaddr_in_vmalloc_area(void *addr) |
348 | { | 366 | { |
349 | if (addr >= (void *)VMALLOC_START && | 367 | if (addr >= (void *)VMALLOC_START && |
@@ -360,6 +378,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |||
360 | free_pages((unsigned long)memmap, | 378 | free_pages((unsigned long)memmap, |
361 | get_order(sizeof(struct page) * nr_pages)); | 379 | get_order(sizeof(struct page) * nr_pages)); |
362 | } | 380 | } |
381 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | ||
363 | 382 | ||
364 | /* | 383 | /* |
365 | * returns the number of sections whose mem_maps were properly | 384 | * returns the number of sections whose mem_maps were properly |
@@ -382,7 +401,7 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | |||
382 | * plus, it does a kmalloc | 401 | * plus, it does a kmalloc |
383 | */ | 402 | */ |
384 | sparse_index_init(section_nr, pgdat->node_id); | 403 | sparse_index_init(section_nr, pgdat->node_id); |
385 | memmap = __kmalloc_section_memmap(nr_pages); | 404 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); |
386 | usemap = __kmalloc_section_usemap(); | 405 | usemap = __kmalloc_section_usemap(); |
387 | 406 | ||
388 | pgdat_resize_lock(pgdat, &flags); | 407 | pgdat_resize_lock(pgdat, &flags); |