diff options
author | Yinghai Lu <yinghai@kernel.org> | 2010-02-10 04:20:21 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-02-12 12:42:37 -0500 |
commit | a4322e1bad91fbca27056fc38d2cbca3f1eae0cf (patch) | |
tree | 4d7b12393e1bd06c9c39e384aa6cc0b35e3257a8 /mm/sparse.c | |
parent | 08677214e318297f228237be0042aac754f48f1d (diff) |
sparsemem: Put usemap for one node together
Could save some buffer space instead of applying one by one.
Could help that system that is going to use early_res instead of bootmem
less entries in early_res make search more faster on system with more memory.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1265793639-15071-18-git-send-email-yinghai@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r-- | mm/sparse.c | 84 |
1 files changed, 66 insertions, 18 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 6ce4aab69e99..0cdaf0b58457 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -271,7 +271,8 @@ static unsigned long *__kmalloc_section_usemap(void) | |||
271 | 271 | ||
272 | #ifdef CONFIG_MEMORY_HOTREMOVE | 272 | #ifdef CONFIG_MEMORY_HOTREMOVE |
273 | static unsigned long * __init | 273 | static unsigned long * __init |
274 | sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) | 274 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
275 | unsigned long count) | ||
275 | { | 276 | { |
276 | unsigned long section_nr; | 277 | unsigned long section_nr; |
277 | 278 | ||
@@ -286,7 +287,7 @@ sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) | |||
286 | * this problem. | 287 | * this problem. |
287 | */ | 288 | */ |
288 | section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | 289 | section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
289 | return alloc_bootmem_section(usemap_size(), section_nr); | 290 | return alloc_bootmem_section(usemap_size() * count, section_nr); |
290 | } | 291 | } |
291 | 292 | ||
292 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | 293 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) |
@@ -329,7 +330,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |||
329 | } | 330 | } |
330 | #else | 331 | #else |
331 | static unsigned long * __init | 332 | static unsigned long * __init |
332 | sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) | 333 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
334 | unsigned long count) | ||
333 | { | 335 | { |
334 | return NULL; | 336 | return NULL; |
335 | } | 337 | } |
@@ -339,27 +341,40 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |||
339 | } | 341 | } |
340 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 342 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
341 | 343 | ||
342 | static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) | 344 | static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, |
345 | unsigned long pnum_begin, | ||
346 | unsigned long pnum_end, | ||
347 | unsigned long usemap_count, int nodeid) | ||
343 | { | 348 | { |
344 | unsigned long *usemap; | 349 | void *usemap; |
345 | struct mem_section *ms = __nr_to_section(pnum); | 350 | unsigned long pnum; |
346 | int nid = sparse_early_nid(ms); | 351 | int size = usemap_size(); |
347 | |||
348 | usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid)); | ||
349 | if (usemap) | ||
350 | return usemap; | ||
351 | 352 | ||
352 | usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); | 353 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
354 | usemap_count); | ||
353 | if (usemap) { | 355 | if (usemap) { |
354 | check_usemap_section_nr(nid, usemap); | 356 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
355 | return usemap; | 357 | if (!present_section_nr(pnum)) |
358 | continue; | ||
359 | usemap_map[pnum] = usemap; | ||
360 | usemap += size; | ||
361 | } | ||
362 | return; | ||
356 | } | 363 | } |
357 | 364 | ||
358 | /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ | 365 | usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count); |
359 | nid = 0; | 366 | if (usemap) { |
367 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
368 | if (!present_section_nr(pnum)) | ||
369 | continue; | ||
370 | usemap_map[pnum] = usemap; | ||
371 | usemap += size; | ||
372 | check_usemap_section_nr(nodeid, usemap_map[pnum]); | ||
373 | } | ||
374 | return; | ||
375 | } | ||
360 | 376 | ||
361 | printk(KERN_WARNING "%s: allocation failed\n", __func__); | 377 | printk(KERN_WARNING "%s: allocation failed\n", __func__); |
362 | return NULL; | ||
363 | } | 378 | } |
364 | 379 | ||
365 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 380 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
@@ -396,6 +411,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
396 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) | 411 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) |
397 | { | 412 | { |
398 | } | 413 | } |
414 | |||
399 | /* | 415 | /* |
400 | * Allocate the accumulated non-linear sections, allocate a mem_map | 416 | * Allocate the accumulated non-linear sections, allocate a mem_map |
401 | * for each and record the physical to section mapping. | 417 | * for each and record the physical to section mapping. |
@@ -407,6 +423,9 @@ void __init sparse_init(void) | |||
407 | unsigned long *usemap; | 423 | unsigned long *usemap; |
408 | unsigned long **usemap_map; | 424 | unsigned long **usemap_map; |
409 | int size; | 425 | int size; |
426 | int nodeid_begin = 0; | ||
427 | unsigned long pnum_begin = 0; | ||
428 | unsigned long usemap_count; | ||
410 | 429 | ||
411 | /* | 430 | /* |
412 | * map is using big page (aka 2M in x86 64 bit) | 431 | * map is using big page (aka 2M in x86 64 bit) |
@@ -425,10 +444,39 @@ void __init sparse_init(void) | |||
425 | panic("can not allocate usemap_map\n"); | 444 | panic("can not allocate usemap_map\n"); |
426 | 445 | ||
427 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 446 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
447 | struct mem_section *ms; | ||
448 | |||
428 | if (!present_section_nr(pnum)) | 449 | if (!present_section_nr(pnum)) |
429 | continue; | 450 | continue; |
430 | usemap_map[pnum] = sparse_early_usemap_alloc(pnum); | 451 | ms = __nr_to_section(pnum); |
452 | nodeid_begin = sparse_early_nid(ms); | ||
453 | pnum_begin = pnum; | ||
454 | break; | ||
455 | } | ||
456 | usemap_count = 1; | ||
457 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | ||
458 | struct mem_section *ms; | ||
459 | int nodeid; | ||
460 | |||
461 | if (!present_section_nr(pnum)) | ||
462 | continue; | ||
463 | ms = __nr_to_section(pnum); | ||
464 | nodeid = sparse_early_nid(ms); | ||
465 | if (nodeid == nodeid_begin) { | ||
466 | usemap_count++; | ||
467 | continue; | ||
468 | } | ||
469 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | ||
470 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum, | ||
471 | usemap_count, nodeid_begin); | ||
472 | /* new start, update count etc*/ | ||
473 | nodeid_begin = nodeid; | ||
474 | pnum_begin = pnum; | ||
475 | usemap_count = 1; | ||
431 | } | 476 | } |
477 | /* ok, last chunk */ | ||
478 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, | ||
479 | usemap_count, nodeid_begin); | ||
432 | 480 | ||
433 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 481 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
434 | if (!present_section_nr(pnum)) | 482 | if (!present_section_nr(pnum)) |