aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-04-13 14:51:06 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-26 16:51:07 -0400
commite123dd3f0ec1664576456ea1ea045591a0a95f0c (patch)
treeb73895fd0750749a1b0d85599ca02da4a920c9c0 /mm
parent539a5fe22620a1665cce504167953a71a43232ad (diff)
mm: make mem_map allocation continuous
vmemmap allocation currently has this layout: [ffffe20000000000-ffffe200001fffff] PMD ->ffff810001400000 on node 0 [ffffe20000200000-ffffe200003fffff] PMD ->ffff810001800000 on node 0 [ffffe20000400000-ffffe200005fffff] PMD ->ffff810001c00000 on node 0 [ffffe20000600000-ffffe200007fffff] PMD ->ffff810002000000 on node 0 [ffffe20000800000-ffffe200009fffff] PMD ->ffff810002400000 on node 0 ... note that there is a 2M hole between them - not optimal. the root cause is that usemap (24 bytes) will be allocated after every 2M mem_map, and it will push next vmemmap (2M) to the next (2M) alignment. solution: try to allocate the mem_map continously. after the patch, we get: [ffffe20000000000-ffffe200001fffff] PMD ->ffff810001400000 on node 0 [ffffe20000200000-ffffe200003fffff] PMD ->ffff810001600000 on node 0 [ffffe20000400000-ffffe200005fffff] PMD ->ffff810001800000 on node 0 [ffffe20000600000-ffffe200007fffff] PMD ->ffff810001a00000 on node 0 [ffffe20000800000-ffffe200009fffff] PMD ->ffff810001c00000 on node 0 ... which is the ideal layout. and usemap will share a page because of they are allocated continuously too: sparse_early_usemap_alloc: usemap = ffff810024e00000 size = 24 sparse_early_usemap_alloc: usemap = ffff810024e00080 size = 24 sparse_early_usemap_alloc: usemap = ffff810024e00100 size = 24 sparse_early_usemap_alloc: usemap = ffff810024e00180 size = 24 ... so we make the bootmem allocation more compact and use less memory for usemap => mission accomplished ;-) Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/sparse.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 98d6b39c3472..458109b99e61 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -304,22 +304,48 @@ void __init sparse_init(void)
304 unsigned long pnum; 304 unsigned long pnum;
305 struct page *map; 305 struct page *map;
306 unsigned long *usemap; 306 unsigned long *usemap;
307 unsigned long **usemap_map;
308 int size;
309
310 /*
311 * map is using big page (aka 2M in x86 64 bit)
312 * usemap is less one page (aka 24 bytes)
313 * so alloc 2M (with 2M align) and 24 bytes in turn will
314 * make next 2M slip to one more 2M later.
315 * then in big system, the memory will have a lot of holes...
316 * here try to allocate 2M pages continously.
317 *
318 * powerpc need to call sparse_init_one_section right after each
319 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
320 */
321 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
322 usemap_map = alloc_bootmem(size);
323 if (!usemap_map)
324 panic("can not allocate usemap_map\n");
307 325
308 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { 326 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
309 if (!present_section_nr(pnum)) 327 if (!present_section_nr(pnum))
310 continue; 328 continue;
329 usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
330 }
311 331
312 map = sparse_early_mem_map_alloc(pnum); 332 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
313 if (!map) 333 if (!present_section_nr(pnum))
314 continue; 334 continue;
315 335
316 usemap = sparse_early_usemap_alloc(pnum); 336 usemap = usemap_map[pnum];
317 if (!usemap) 337 if (!usemap)
318 continue; 338 continue;
319 339
340 map = sparse_early_mem_map_alloc(pnum);
341 if (!map)
342 continue;
343
320 sparse_init_one_section(__nr_to_section(pnum), pnum, map, 344 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
321 usemap); 345 usemap);
322 } 346 }
347
348 free_bootmem(__pa(usemap_map), size);
323} 349}
324 350
325#ifdef CONFIG_MEMORY_HOTPLUG 351#ifdef CONFIG_MEMORY_HOTPLUG