aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c37
1 files changed, 34 insertions, 3 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 98d6b39c3472..7e9191381f86 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -295,6 +295,9 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
295 return NULL; 295 return NULL;
296} 296}
297 297
298void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
299{
300}
298/* 301/*
299 * Allocate the accumulated non-linear sections, allocate a mem_map 302 * Allocate the accumulated non-linear sections, allocate a mem_map
300 * for each and record the physical to section mapping. 303 * for each and record the physical to section mapping.
@@ -304,22 +307,50 @@ void __init sparse_init(void)
304 unsigned long pnum; 307 unsigned long pnum;
305 struct page *map; 308 struct page *map;
306 unsigned long *usemap; 309 unsigned long *usemap;
310 unsigned long **usemap_map;
311 int size;
312
313 /*
314 * map is using big page (aka 2M in x86 64 bit)
315 * usemap is less one page (aka 24 bytes)
316 * so alloc 2M (with 2M align) and 24 bytes in turn will
317 * make next 2M slip to one more 2M later.
318 * then in big system, the memory will have a lot of holes...
319 * here try to allocate 2M pages continously.
320 *
321 * powerpc need to call sparse_init_one_section right after each
322 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
323 */
324 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
325 usemap_map = alloc_bootmem(size);
326 if (!usemap_map)
327 panic("can not allocate usemap_map\n");
307 328
308 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 329 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
309 if (!present_section_nr(pnum)) 330 if (!present_section_nr(pnum))
310 continue; 331 continue;
332 usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
333 }
311 334
312 map = sparse_early_mem_map_alloc(pnum); 335 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
313 if (!map) 336 if (!present_section_nr(pnum))
314 continue; 337 continue;
315 338
316 usemap = sparse_early_usemap_alloc(pnum); 339 usemap = usemap_map[pnum];
317 if (!usemap) 340 if (!usemap)
318 continue; 341 continue;
319 342
343 map = sparse_early_mem_map_alloc(pnum);
344 if (!map)
345 continue;
346
320 sparse_init_one_section(__nr_to_section(pnum), pnum, map, 347 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
321 usemap); 348 usemap);
322 } 349 }
350
351 vmemmap_populate_print_last();
352
353 free_bootmem(__pa(usemap_map), size);
323} 354}
324 355
325#ifdef CONFIG_MEMORY_HOTPLUG 356#ifdef CONFIG_MEMORY_HOTPLUG