diff options
author | Yasunori Goto <y-goto@jp.fujitsu.com> | 2007-10-16 04:26:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:43:02 -0400 |
commit | 98f3cfc1dc7a53b629d43b7844a9b3f786213048 (patch) | |
tree | de98ff0e3c25b01863c31abc0f3054de978b9ef8 | |
parent | 48e94196a533dbee17c252bf80d0310fb8c8c2eb (diff) |
memory hotplug: Hot-add with sparsemem-vmemmap
This patch is to avoid panic when memory hot-add is executed with
sparsemem-vmemmap. Current vmemmap-sparsemem code doesn't support memory
hot-add. Vmemmap must be populated when hot-add. This is for
2.6.23-rc2-mm2.
Todo: # Even if this patch is applied, the message "[xxxx-xxxx] potential
offnode page_structs" is displayed. To allocate memmap on its node,
memmap (and pgdat) must be initialized itself like chicken and
egg relationship.
# vmemmap_unpopulate will be necessary for followings.
- For cancel hot-add due to error.
- For unplug.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 2 | ||||
-rw-r--r-- | mm/sparse.c | 25 |
3 files changed, 24 insertions, 5 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 292c68623759..cc551f06728b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1147,7 +1147,7 @@ extern int randomize_va_space; | |||
1147 | 1147 | ||
1148 | const char * arch_vma_name(struct vm_area_struct *vma); | 1148 | const char * arch_vma_name(struct vm_area_struct *vma); |
1149 | 1149 | ||
1150 | struct page *sparse_early_mem_map_populate(unsigned long pnum, int nid); | 1150 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); |
1151 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | 1151 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
1152 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); | 1152 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); |
1153 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); | 1153 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); |
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 4f2d4854f840..d3b718b0c20a 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -137,7 +137,7 @@ int __meminit vmemmap_populate_basepages(struct page *start_page, | |||
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) | 140 | struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) |
141 | { | 141 | { |
142 | struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); | 142 | struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); |
143 | int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); | 143 | int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); |
diff --git a/mm/sparse.c b/mm/sparse.c index 1f4dbb867b8a..08fb14f5eea3 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -259,7 +259,7 @@ static unsigned long *sparse_early_usemap_alloc(unsigned long pnum) | |||
259 | } | 259 | } |
260 | 260 | ||
261 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 261 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
262 | struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) | 262 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
263 | { | 263 | { |
264 | struct page *map; | 264 | struct page *map; |
265 | 265 | ||
@@ -284,7 +284,7 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
284 | struct mem_section *ms = __nr_to_section(pnum); | 284 | struct mem_section *ms = __nr_to_section(pnum); |
285 | int nid = sparse_early_nid(ms); | 285 | int nid = sparse_early_nid(ms); |
286 | 286 | ||
287 | map = sparse_early_mem_map_populate(pnum, nid); | 287 | map = sparse_mem_map_populate(pnum, nid); |
288 | if (map) | 288 | if (map) |
289 | return map; | 289 | return map; |
290 | 290 | ||
@@ -322,6 +322,18 @@ void __init sparse_init(void) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | #ifdef CONFIG_MEMORY_HOTPLUG | 324 | #ifdef CONFIG_MEMORY_HOTPLUG |
325 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
326 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | ||
327 | unsigned long nr_pages) | ||
328 | { | ||
329 | /* This will make the necessary allocations eventually. */ | ||
330 | return sparse_mem_map_populate(pnum, nid); | ||
331 | } | ||
332 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | ||
333 | { | ||
334 | return; /* XXX: Not implemented yet */ | ||
335 | } | ||
336 | #else | ||
325 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | 337 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) |
326 | { | 338 | { |
327 | struct page *page, *ret; | 339 | struct page *page, *ret; |
@@ -344,6 +356,12 @@ got_map_ptr: | |||
344 | return ret; | 356 | return ret; |
345 | } | 357 | } |
346 | 358 | ||
359 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | ||
360 | unsigned long nr_pages) | ||
361 | { | ||
362 | return __kmalloc_section_memmap(nr_pages); | ||
363 | } | ||
364 | |||
347 | static int vaddr_in_vmalloc_area(void *addr) | 365 | static int vaddr_in_vmalloc_area(void *addr) |
348 | { | 366 | { |
349 | if (addr >= (void *)VMALLOC_START && | 367 | if (addr >= (void *)VMALLOC_START && |
@@ -360,6 +378,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |||
360 | free_pages((unsigned long)memmap, | 378 | free_pages((unsigned long)memmap, |
361 | get_order(sizeof(struct page) * nr_pages)); | 379 | get_order(sizeof(struct page) * nr_pages)); |
362 | } | 380 | } |
381 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | ||
363 | 382 | ||
364 | /* | 383 | /* |
365 | * returns the number of sections whose mem_maps were properly | 384 | * returns the number of sections whose mem_maps were properly |
@@ -382,7 +401,7 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | |||
382 | * plus, it does a kmalloc | 401 | * plus, it does a kmalloc |
383 | */ | 402 | */ |
384 | sparse_index_init(section_nr, pgdat->node_id); | 403 | sparse_index_init(section_nr, pgdat->node_id); |
385 | memmap = __kmalloc_section_memmap(nr_pages); | 404 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); |
386 | usemap = __kmalloc_section_usemap(); | 405 | usemap = __kmalloc_section_usemap(); |
387 | 406 | ||
388 | pgdat_resize_lock(pgdat, &flags); | 407 | pgdat_resize_lock(pgdat, &flags); |