diff options
Diffstat (limited to 'mm/sparse.c')
| -rw-r--r-- | mm/sparse.c | 111 |
1 files changed, 110 insertions, 1 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index 0cdaf0b58457..9b6b93a4d78d 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
| @@ -390,8 +390,65 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) | |||
| 390 | PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); | 390 | PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); |
| 391 | return map; | 391 | return map; |
| 392 | } | 392 | } |
| 393 | void __init sparse_mem_maps_populate_node(struct page **map_map, | ||
| 394 | unsigned long pnum_begin, | ||
| 395 | unsigned long pnum_end, | ||
| 396 | unsigned long map_count, int nodeid) | ||
| 397 | { | ||
| 398 | void *map; | ||
| 399 | unsigned long pnum; | ||
| 400 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | ||
| 401 | |||
| 402 | map = alloc_remap(nodeid, size * map_count); | ||
| 403 | if (map) { | ||
| 404 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
| 405 | if (!present_section_nr(pnum)) | ||
| 406 | continue; | ||
| 407 | map_map[pnum] = map; | ||
| 408 | map += size; | ||
| 409 | } | ||
| 410 | return; | ||
| 411 | } | ||
| 412 | |||
| 413 | size = PAGE_ALIGN(size); | ||
| 414 | map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count); | ||
| 415 | if (map) { | ||
| 416 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
| 417 | if (!present_section_nr(pnum)) | ||
| 418 | continue; | ||
| 419 | map_map[pnum] = map; | ||
| 420 | map += size; | ||
| 421 | } | ||
| 422 | return; | ||
| 423 | } | ||
| 424 | |||
| 425 | /* fallback */ | ||
| 426 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
| 427 | struct mem_section *ms; | ||
| 428 | |||
| 429 | if (!present_section_nr(pnum)) | ||
| 430 | continue; | ||
| 431 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | ||
| 432 | if (map_map[pnum]) | ||
| 433 | continue; | ||
| 434 | ms = __nr_to_section(pnum); | ||
| 435 | printk(KERN_ERR "%s: sparsemem memory map backing failed " | ||
| 436 | "some memory will not be available.\n", __func__); | ||
| 437 | ms->section_mem_map = 0; | ||
| 438 | } | ||
| 439 | } | ||
| 393 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 440 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
| 394 | 441 | ||
| 442 | static void __init sparse_early_mem_maps_alloc_node(struct page **map_map, | ||
| 443 | unsigned long pnum_begin, | ||
| 444 | unsigned long pnum_end, | ||
| 445 | unsigned long map_count, int nodeid) | ||
| 446 | { | ||
| 447 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, | ||
| 448 | map_count, nodeid); | ||
| 449 | } | ||
| 450 | |||
| 451 | #ifndef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
| 395 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | 452 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
| 396 | { | 453 | { |
| 397 | struct page *map; | 454 | struct page *map; |
| @@ -407,6 +464,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
| 407 | ms->section_mem_map = 0; | 464 | ms->section_mem_map = 0; |
| 408 | return NULL; | 465 | return NULL; |
| 409 | } | 466 | } |
| 467 | #endif | ||
| 410 | 468 | ||
| 411 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) | 469 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) |
| 412 | { | 470 | { |
| @@ -420,12 +478,14 @@ void __init sparse_init(void) | |||
| 420 | { | 478 | { |
| 421 | unsigned long pnum; | 479 | unsigned long pnum; |
| 422 | struct page *map; | 480 | struct page *map; |
| 481 | struct page **map_map; | ||
| 423 | unsigned long *usemap; | 482 | unsigned long *usemap; |
| 424 | unsigned long **usemap_map; | 483 | unsigned long **usemap_map; |
| 425 | int size; | 484 | int size, size2; |
| 426 | int nodeid_begin = 0; | 485 | int nodeid_begin = 0; |
| 427 | unsigned long pnum_begin = 0; | 486 | unsigned long pnum_begin = 0; |
| 428 | unsigned long usemap_count; | 487 | unsigned long usemap_count; |
| 488 | unsigned long map_count; | ||
| 429 | 489 | ||
| 430 | /* | 490 | /* |
| 431 | * map is using big page (aka 2M in x86 64 bit) | 491 | * map is using big page (aka 2M in x86 64 bit) |
| @@ -478,6 +538,48 @@ void __init sparse_init(void) | |||
| 478 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, | 538 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, |
| 479 | usemap_count, nodeid_begin); | 539 | usemap_count, nodeid_begin); |
| 480 | 540 | ||
| 541 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
| 542 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; | ||
| 543 | map_map = alloc_bootmem(size2); | ||
| 544 | if (!map_map) | ||
| 545 | panic("can not allocate map_map\n"); | ||
| 546 | |||
| 547 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | ||
| 548 | struct mem_section *ms; | ||
| 549 | |||
| 550 | if (!present_section_nr(pnum)) | ||
| 551 | continue; | ||
| 552 | ms = __nr_to_section(pnum); | ||
| 553 | nodeid_begin = sparse_early_nid(ms); | ||
| 554 | pnum_begin = pnum; | ||
| 555 | break; | ||
| 556 | } | ||
| 557 | map_count = 1; | ||
| 558 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | ||
| 559 | struct mem_section *ms; | ||
| 560 | int nodeid; | ||
| 561 | |||
| 562 | if (!present_section_nr(pnum)) | ||
| 563 | continue; | ||
| 564 | ms = __nr_to_section(pnum); | ||
| 565 | nodeid = sparse_early_nid(ms); | ||
| 566 | if (nodeid == nodeid_begin) { | ||
| 567 | map_count++; | ||
| 568 | continue; | ||
| 569 | } | ||
| 570 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | ||
| 571 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum, | ||
| 572 | map_count, nodeid_begin); | ||
| 573 | /* new start, update count etc*/ | ||
| 574 | nodeid_begin = nodeid; | ||
| 575 | pnum_begin = pnum; | ||
| 576 | map_count = 1; | ||
| 577 | } | ||
| 578 | /* ok, last chunk */ | ||
| 579 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS, | ||
| 580 | map_count, nodeid_begin); | ||
| 581 | #endif | ||
| 582 | |||
| 481 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 583 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
| 482 | if (!present_section_nr(pnum)) | 584 | if (!present_section_nr(pnum)) |
| 483 | continue; | 585 | continue; |
| @@ -486,7 +588,11 @@ void __init sparse_init(void) | |||
| 486 | if (!usemap) | 588 | if (!usemap) |
| 487 | continue; | 589 | continue; |
| 488 | 590 | ||
| 591 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
| 592 | map = map_map[pnum]; | ||
| 593 | #else | ||
| 489 | map = sparse_early_mem_map_alloc(pnum); | 594 | map = sparse_early_mem_map_alloc(pnum); |
| 595 | #endif | ||
| 490 | if (!map) | 596 | if (!map) |
| 491 | continue; | 597 | continue; |
| 492 | 598 | ||
| @@ -496,6 +602,9 @@ void __init sparse_init(void) | |||
| 496 | 602 | ||
| 497 | vmemmap_populate_print_last(); | 603 | vmemmap_populate_print_last(); |
| 498 | 604 | ||
| 605 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
| 606 | free_bootmem(__pa(map_map), size2); | ||
| 607 | #endif | ||
| 499 | free_bootmem(__pa(usemap_map), size); | 608 | free_bootmem(__pa(usemap_map), size); |
| 500 | } | 609 | } |
| 501 | 610 | ||
