diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-03 11:15:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-03 11:15:05 -0500 |
commit | a626b46e17d0762d664ce471d40bc506b6e721ab (patch) | |
tree | 445f6ac655ea9247d2e27529f23ba02d0991fec0 /mm | |
parent | c1dcb4bb1e3e16e9baee578d9bb040e5fba1063e (diff) | |
parent | dce46a04d55d6358d2d4ab44a4946a19f9425fe2 (diff) |
Merge branch 'x86-bootmem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-bootmem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (30 commits)
early_res: Need to save the allocation name in drop_range_partial()
sparsemem: Fix compilation on PowerPC
early_res: Add free_early_partial()
x86: Fix non-bootmem compilation on PowerPC
core: Move early_res from arch/x86 to kernel/
x86: Add find_fw_memmap_area
Move round_up/down to kernel.h
x86: Make 32bit support NO_BOOTMEM
early_res: Enhance check_and_double_early_res
x86: Move back find_e820_area to e820.c
x86: Add find_early_area_size
x86: Separate early_res related code from e820.c
x86: Move bios page reserve early to head32/64.c
sparsemem: Put mem map for one node together.
sparsemem: Put usemap for one node together
x86: Make 64 bit use early_res instead of bootmem before slab
x86: Only call dma32_reserve_bootmem 64bit !CONFIG_NUMA
x86: Make early_node_mem get mem > 4 GB if possible
x86: Dynamically increase early_res array size
x86: Introduce max_early_res and early_res_count
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 4 | ||||
-rw-r--r-- | mm/bootmem.c | 195 | ||||
-rw-r--r-- | mm/page_alloc.c | 61 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 76 | ||||
-rw-r--r-- | mm/sparse.c | 196 |
5 files changed, 508 insertions, 24 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index d34c2b971032..9c61158308dc 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -115,6 +115,10 @@ config SPARSEMEM_EXTREME | |||
115 | config SPARSEMEM_VMEMMAP_ENABLE | 115 | config SPARSEMEM_VMEMMAP_ENABLE |
116 | bool | 116 | bool |
117 | 117 | ||
118 | config SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
119 | def_bool y | ||
120 | depends on SPARSEMEM && X86_64 | ||
121 | |||
118 | config SPARSEMEM_VMEMMAP | 122 | config SPARSEMEM_VMEMMAP |
119 | bool "Sparse Memory virtual memmap" | 123 | bool "Sparse Memory virtual memmap" |
120 | depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE | 124 | depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 7d1486875e1c..d7c791ef0036 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/kmemleak.h> | 15 | #include <linux/kmemleak.h> |
16 | #include <linux/range.h> | ||
16 | 17 | ||
17 | #include <asm/bug.h> | 18 | #include <asm/bug.h> |
18 | #include <asm/io.h> | 19 | #include <asm/io.h> |
@@ -32,6 +33,7 @@ unsigned long max_pfn; | |||
32 | unsigned long saved_max_pfn; | 33 | unsigned long saved_max_pfn; |
33 | #endif | 34 | #endif |
34 | 35 | ||
36 | #ifndef CONFIG_NO_BOOTMEM | ||
35 | bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; | 37 | bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; |
36 | 38 | ||
37 | static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); | 39 | static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); |
@@ -142,7 +144,7 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages) | |||
142 | min_low_pfn = start; | 144 | min_low_pfn = start; |
143 | return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); | 145 | return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); |
144 | } | 146 | } |
145 | 147 | #endif | |
146 | /* | 148 | /* |
147 | * free_bootmem_late - free bootmem pages directly to page allocator | 149 | * free_bootmem_late - free bootmem pages directly to page allocator |
148 | * @addr: starting address of the range | 150 | * @addr: starting address of the range |
@@ -167,6 +169,60 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) | |||
167 | } | 169 | } |
168 | } | 170 | } |
169 | 171 | ||
172 | #ifdef CONFIG_NO_BOOTMEM | ||
173 | static void __init __free_pages_memory(unsigned long start, unsigned long end) | ||
174 | { | ||
175 | int i; | ||
176 | unsigned long start_aligned, end_aligned; | ||
177 | int order = ilog2(BITS_PER_LONG); | ||
178 | |||
179 | start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); | ||
180 | end_aligned = end & ~(BITS_PER_LONG - 1); | ||
181 | |||
182 | if (end_aligned <= start_aligned) { | ||
183 | #if 1 | ||
184 | printk(KERN_DEBUG " %lx - %lx\n", start, end); | ||
185 | #endif | ||
186 | for (i = start; i < end; i++) | ||
187 | __free_pages_bootmem(pfn_to_page(i), 0); | ||
188 | |||
189 | return; | ||
190 | } | ||
191 | |||
192 | #if 1 | ||
193 | printk(KERN_DEBUG " %lx %lx - %lx %lx\n", | ||
194 | start, start_aligned, end_aligned, end); | ||
195 | #endif | ||
196 | for (i = start; i < start_aligned; i++) | ||
197 | __free_pages_bootmem(pfn_to_page(i), 0); | ||
198 | |||
199 | for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG) | ||
200 | __free_pages_bootmem(pfn_to_page(i), order); | ||
201 | |||
202 | for (i = end_aligned; i < end; i++) | ||
203 | __free_pages_bootmem(pfn_to_page(i), 0); | ||
204 | } | ||
205 | |||
206 | unsigned long __init free_all_memory_core_early(int nodeid) | ||
207 | { | ||
208 | int i; | ||
209 | u64 start, end; | ||
210 | unsigned long count = 0; | ||
211 | struct range *range = NULL; | ||
212 | int nr_range; | ||
213 | |||
214 | nr_range = get_free_all_memory_range(&range, nodeid); | ||
215 | |||
216 | for (i = 0; i < nr_range; i++) { | ||
217 | start = range[i].start; | ||
218 | end = range[i].end; | ||
219 | count += end - start; | ||
220 | __free_pages_memory(start, end); | ||
221 | } | ||
222 | |||
223 | return count; | ||
224 | } | ||
225 | #else | ||
170 | static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | 226 | static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) |
171 | { | 227 | { |
172 | int aligned; | 228 | int aligned; |
@@ -227,6 +283,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | |||
227 | 283 | ||
228 | return count; | 284 | return count; |
229 | } | 285 | } |
286 | #endif | ||
230 | 287 | ||
231 | /** | 288 | /** |
232 | * free_all_bootmem_node - release a node's free pages to the buddy allocator | 289 | * free_all_bootmem_node - release a node's free pages to the buddy allocator |
@@ -237,7 +294,12 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) | |||
237 | unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) | 294 | unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) |
238 | { | 295 | { |
239 | register_page_bootmem_info_node(pgdat); | 296 | register_page_bootmem_info_node(pgdat); |
297 | #ifdef CONFIG_NO_BOOTMEM | ||
298 | /* free_all_memory_core_early(MAX_NUMNODES) will be called later */ | ||
299 | return 0; | ||
300 | #else | ||
240 | return free_all_bootmem_core(pgdat->bdata); | 301 | return free_all_bootmem_core(pgdat->bdata); |
302 | #endif | ||
241 | } | 303 | } |
242 | 304 | ||
243 | /** | 305 | /** |
@@ -247,9 +309,14 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) | |||
247 | */ | 309 | */ |
248 | unsigned long __init free_all_bootmem(void) | 310 | unsigned long __init free_all_bootmem(void) |
249 | { | 311 | { |
312 | #ifdef CONFIG_NO_BOOTMEM | ||
313 | return free_all_memory_core_early(NODE_DATA(0)->node_id); | ||
314 | #else | ||
250 | return free_all_bootmem_core(NODE_DATA(0)->bdata); | 315 | return free_all_bootmem_core(NODE_DATA(0)->bdata); |
316 | #endif | ||
251 | } | 317 | } |
252 | 318 | ||
319 | #ifndef CONFIG_NO_BOOTMEM | ||
253 | static void __init __free(bootmem_data_t *bdata, | 320 | static void __init __free(bootmem_data_t *bdata, |
254 | unsigned long sidx, unsigned long eidx) | 321 | unsigned long sidx, unsigned long eidx) |
255 | { | 322 | { |
@@ -344,6 +411,7 @@ static int __init mark_bootmem(unsigned long start, unsigned long end, | |||
344 | } | 411 | } |
345 | BUG(); | 412 | BUG(); |
346 | } | 413 | } |
414 | #endif | ||
347 | 415 | ||
348 | /** | 416 | /** |
349 | * free_bootmem_node - mark a page range as usable | 417 | * free_bootmem_node - mark a page range as usable |
@@ -358,6 +426,12 @@ static int __init mark_bootmem(unsigned long start, unsigned long end, | |||
358 | void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | 426 | void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, |
359 | unsigned long size) | 427 | unsigned long size) |
360 | { | 428 | { |
429 | #ifdef CONFIG_NO_BOOTMEM | ||
430 | free_early(physaddr, physaddr + size); | ||
431 | #if 0 | ||
432 | printk(KERN_DEBUG "free %lx %lx\n", physaddr, size); | ||
433 | #endif | ||
434 | #else | ||
361 | unsigned long start, end; | 435 | unsigned long start, end; |
362 | 436 | ||
363 | kmemleak_free_part(__va(physaddr), size); | 437 | kmemleak_free_part(__va(physaddr), size); |
@@ -366,6 +440,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
366 | end = PFN_DOWN(physaddr + size); | 440 | end = PFN_DOWN(physaddr + size); |
367 | 441 | ||
368 | mark_bootmem_node(pgdat->bdata, start, end, 0, 0); | 442 | mark_bootmem_node(pgdat->bdata, start, end, 0, 0); |
443 | #endif | ||
369 | } | 444 | } |
370 | 445 | ||
371 | /** | 446 | /** |
@@ -379,6 +454,12 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
379 | */ | 454 | */ |
380 | void __init free_bootmem(unsigned long addr, unsigned long size) | 455 | void __init free_bootmem(unsigned long addr, unsigned long size) |
381 | { | 456 | { |
457 | #ifdef CONFIG_NO_BOOTMEM | ||
458 | free_early(addr, addr + size); | ||
459 | #if 0 | ||
460 | printk(KERN_DEBUG "free %lx %lx\n", addr, size); | ||
461 | #endif | ||
462 | #else | ||
382 | unsigned long start, end; | 463 | unsigned long start, end; |
383 | 464 | ||
384 | kmemleak_free_part(__va(addr), size); | 465 | kmemleak_free_part(__va(addr), size); |
@@ -387,6 +468,7 @@ void __init free_bootmem(unsigned long addr, unsigned long size) | |||
387 | end = PFN_DOWN(addr + size); | 468 | end = PFN_DOWN(addr + size); |
388 | 469 | ||
389 | mark_bootmem(start, end, 0, 0); | 470 | mark_bootmem(start, end, 0, 0); |
471 | #endif | ||
390 | } | 472 | } |
391 | 473 | ||
392 | /** | 474 | /** |
@@ -403,12 +485,17 @@ void __init free_bootmem(unsigned long addr, unsigned long size) | |||
403 | int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | 485 | int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, |
404 | unsigned long size, int flags) | 486 | unsigned long size, int flags) |
405 | { | 487 | { |
488 | #ifdef CONFIG_NO_BOOTMEM | ||
489 | panic("no bootmem"); | ||
490 | return 0; | ||
491 | #else | ||
406 | unsigned long start, end; | 492 | unsigned long start, end; |
407 | 493 | ||
408 | start = PFN_DOWN(physaddr); | 494 | start = PFN_DOWN(physaddr); |
409 | end = PFN_UP(physaddr + size); | 495 | end = PFN_UP(physaddr + size); |
410 | 496 | ||
411 | return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); | 497 | return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); |
498 | #endif | ||
412 | } | 499 | } |
413 | 500 | ||
414 | /** | 501 | /** |
@@ -424,14 +511,20 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
424 | int __init reserve_bootmem(unsigned long addr, unsigned long size, | 511 | int __init reserve_bootmem(unsigned long addr, unsigned long size, |
425 | int flags) | 512 | int flags) |
426 | { | 513 | { |
514 | #ifdef CONFIG_NO_BOOTMEM | ||
515 | panic("no bootmem"); | ||
516 | return 0; | ||
517 | #else | ||
427 | unsigned long start, end; | 518 | unsigned long start, end; |
428 | 519 | ||
429 | start = PFN_DOWN(addr); | 520 | start = PFN_DOWN(addr); |
430 | end = PFN_UP(addr + size); | 521 | end = PFN_UP(addr + size); |
431 | 522 | ||
432 | return mark_bootmem(start, end, 1, flags); | 523 | return mark_bootmem(start, end, 1, flags); |
524 | #endif | ||
433 | } | 525 | } |
434 | 526 | ||
527 | #ifndef CONFIG_NO_BOOTMEM | ||
435 | static unsigned long __init align_idx(struct bootmem_data *bdata, | 528 | static unsigned long __init align_idx(struct bootmem_data *bdata, |
436 | unsigned long idx, unsigned long step) | 529 | unsigned long idx, unsigned long step) |
437 | { | 530 | { |
@@ -582,12 +675,33 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, | |||
582 | #endif | 675 | #endif |
583 | return NULL; | 676 | return NULL; |
584 | } | 677 | } |
678 | #endif | ||
585 | 679 | ||
586 | static void * __init ___alloc_bootmem_nopanic(unsigned long size, | 680 | static void * __init ___alloc_bootmem_nopanic(unsigned long size, |
587 | unsigned long align, | 681 | unsigned long align, |
588 | unsigned long goal, | 682 | unsigned long goal, |
589 | unsigned long limit) | 683 | unsigned long limit) |
590 | { | 684 | { |
685 | #ifdef CONFIG_NO_BOOTMEM | ||
686 | void *ptr; | ||
687 | |||
688 | if (WARN_ON_ONCE(slab_is_available())) | ||
689 | return kzalloc(size, GFP_NOWAIT); | ||
690 | |||
691 | restart: | ||
692 | |||
693 | ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit); | ||
694 | |||
695 | if (ptr) | ||
696 | return ptr; | ||
697 | |||
698 | if (goal != 0) { | ||
699 | goal = 0; | ||
700 | goto restart; | ||
701 | } | ||
702 | |||
703 | return NULL; | ||
704 | #else | ||
591 | bootmem_data_t *bdata; | 705 | bootmem_data_t *bdata; |
592 | void *region; | 706 | void *region; |
593 | 707 | ||
@@ -613,6 +727,7 @@ restart: | |||
613 | } | 727 | } |
614 | 728 | ||
615 | return NULL; | 729 | return NULL; |
730 | #endif | ||
616 | } | 731 | } |
617 | 732 | ||
618 | /** | 733 | /** |
@@ -631,7 +746,13 @@ restart: | |||
631 | void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, | 746 | void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, |
632 | unsigned long goal) | 747 | unsigned long goal) |
633 | { | 748 | { |
634 | return ___alloc_bootmem_nopanic(size, align, goal, 0); | 749 | unsigned long limit = 0; |
750 | |||
751 | #ifdef CONFIG_NO_BOOTMEM | ||
752 | limit = -1UL; | ||
753 | #endif | ||
754 | |||
755 | return ___alloc_bootmem_nopanic(size, align, goal, limit); | ||
635 | } | 756 | } |
636 | 757 | ||
637 | static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, | 758 | static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, |
@@ -665,9 +786,16 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, | |||
665 | void * __init __alloc_bootmem(unsigned long size, unsigned long align, | 786 | void * __init __alloc_bootmem(unsigned long size, unsigned long align, |
666 | unsigned long goal) | 787 | unsigned long goal) |
667 | { | 788 | { |
668 | return ___alloc_bootmem(size, align, goal, 0); | 789 | unsigned long limit = 0; |
790 | |||
791 | #ifdef CONFIG_NO_BOOTMEM | ||
792 | limit = -1UL; | ||
793 | #endif | ||
794 | |||
795 | return ___alloc_bootmem(size, align, goal, limit); | ||
669 | } | 796 | } |
670 | 797 | ||
798 | #ifndef CONFIG_NO_BOOTMEM | ||
671 | static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, | 799 | static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, |
672 | unsigned long size, unsigned long align, | 800 | unsigned long size, unsigned long align, |
673 | unsigned long goal, unsigned long limit) | 801 | unsigned long goal, unsigned long limit) |
@@ -684,6 +812,7 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, | |||
684 | 812 | ||
685 | return ___alloc_bootmem(size, align, goal, limit); | 813 | return ___alloc_bootmem(size, align, goal, limit); |
686 | } | 814 | } |
815 | #endif | ||
687 | 816 | ||
688 | /** | 817 | /** |
689 | * __alloc_bootmem_node - allocate boot memory from a specific node | 818 | * __alloc_bootmem_node - allocate boot memory from a specific node |
@@ -706,7 +835,46 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | |||
706 | if (WARN_ON_ONCE(slab_is_available())) | 835 | if (WARN_ON_ONCE(slab_is_available())) |
707 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | 836 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
708 | 837 | ||
838 | #ifdef CONFIG_NO_BOOTMEM | ||
839 | return __alloc_memory_core_early(pgdat->node_id, size, align, | ||
840 | goal, -1ULL); | ||
841 | #else | ||
709 | return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); | 842 | return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); |
843 | #endif | ||
844 | } | ||
845 | |||
846 | void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, | ||
847 | unsigned long align, unsigned long goal) | ||
848 | { | ||
849 | #ifdef MAX_DMA32_PFN | ||
850 | unsigned long end_pfn; | ||
851 | |||
852 | if (WARN_ON_ONCE(slab_is_available())) | ||
853 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | ||
854 | |||
855 | /* update goal according ...MAX_DMA32_PFN */ | ||
856 | end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; | ||
857 | |||
858 | if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && | ||
859 | (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { | ||
860 | void *ptr; | ||
861 | unsigned long new_goal; | ||
862 | |||
863 | new_goal = MAX_DMA32_PFN << PAGE_SHIFT; | ||
864 | #ifdef CONFIG_NO_BOOTMEM | ||
865 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, | ||
866 | new_goal, -1ULL); | ||
867 | #else | ||
868 | ptr = alloc_bootmem_core(pgdat->bdata, size, align, | ||
869 | new_goal, 0); | ||
870 | #endif | ||
871 | if (ptr) | ||
872 | return ptr; | ||
873 | } | ||
874 | #endif | ||
875 | |||
876 | return __alloc_bootmem_node(pgdat, size, align, goal); | ||
877 | |||
710 | } | 878 | } |
711 | 879 | ||
712 | #ifdef CONFIG_SPARSEMEM | 880 | #ifdef CONFIG_SPARSEMEM |
@@ -720,6 +888,16 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | |||
720 | void * __init alloc_bootmem_section(unsigned long size, | 888 | void * __init alloc_bootmem_section(unsigned long size, |
721 | unsigned long section_nr) | 889 | unsigned long section_nr) |
722 | { | 890 | { |
891 | #ifdef CONFIG_NO_BOOTMEM | ||
892 | unsigned long pfn, goal, limit; | ||
893 | |||
894 | pfn = section_nr_to_pfn(section_nr); | ||
895 | goal = pfn << PAGE_SHIFT; | ||
896 | limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; | ||
897 | |||
898 | return __alloc_memory_core_early(early_pfn_to_nid(pfn), size, | ||
899 | SMP_CACHE_BYTES, goal, limit); | ||
900 | #else | ||
723 | bootmem_data_t *bdata; | 901 | bootmem_data_t *bdata; |
724 | unsigned long pfn, goal, limit; | 902 | unsigned long pfn, goal, limit; |
725 | 903 | ||
@@ -729,6 +907,7 @@ void * __init alloc_bootmem_section(unsigned long size, | |||
729 | bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; | 907 | bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; |
730 | 908 | ||
731 | return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); | 909 | return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); |
910 | #endif | ||
732 | } | 911 | } |
733 | #endif | 912 | #endif |
734 | 913 | ||
@@ -740,11 +919,16 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, | |||
740 | if (WARN_ON_ONCE(slab_is_available())) | 919 | if (WARN_ON_ONCE(slab_is_available())) |
741 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | 920 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
742 | 921 | ||
922 | #ifdef CONFIG_NO_BOOTMEM | ||
923 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, | ||
924 | goal, -1ULL); | ||
925 | #else | ||
743 | ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); | 926 | ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); |
744 | if (ptr) | 927 | if (ptr) |
745 | return ptr; | 928 | return ptr; |
746 | 929 | ||
747 | ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); | 930 | ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); |
931 | #endif | ||
748 | if (ptr) | 932 | if (ptr) |
749 | return ptr; | 933 | return ptr; |
750 | 934 | ||
@@ -795,6 +979,11 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, | |||
795 | if (WARN_ON_ONCE(slab_is_available())) | 979 | if (WARN_ON_ONCE(slab_is_available())) |
796 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | 980 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
797 | 981 | ||
982 | #ifdef CONFIG_NO_BOOTMEM | ||
983 | return __alloc_memory_core_early(pgdat->node_id, size, align, | ||
984 | goal, ARCH_LOW_ADDRESS_LIMIT); | ||
985 | #else | ||
798 | return ___alloc_bootmem_node(pgdat->bdata, size, align, | 986 | return ___alloc_bootmem_node(pgdat->bdata, size, align, |
799 | goal, ARCH_LOW_ADDRESS_LIMIT); | 987 | goal, ARCH_LOW_ADDRESS_LIMIT); |
988 | #endif | ||
800 | } | 989 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9a7aaae07ab4..a6b17aa4740b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3374,6 +3374,61 @@ void __init free_bootmem_with_active_regions(int nid, | |||
3374 | } | 3374 | } |
3375 | } | 3375 | } |
3376 | 3376 | ||
3377 | int __init add_from_early_node_map(struct range *range, int az, | ||
3378 | int nr_range, int nid) | ||
3379 | { | ||
3380 | int i; | ||
3381 | u64 start, end; | ||
3382 | |||
3383 | /* need to go over early_node_map to find out good range for node */ | ||
3384 | for_each_active_range_index_in_nid(i, nid) { | ||
3385 | start = early_node_map[i].start_pfn; | ||
3386 | end = early_node_map[i].end_pfn; | ||
3387 | nr_range = add_range(range, az, nr_range, start, end); | ||
3388 | } | ||
3389 | return nr_range; | ||
3390 | } | ||
3391 | |||
3392 | #ifdef CONFIG_NO_BOOTMEM | ||
3393 | void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, | ||
3394 | u64 goal, u64 limit) | ||
3395 | { | ||
3396 | int i; | ||
3397 | void *ptr; | ||
3398 | |||
3399 | /* need to go over early_node_map to find out good range for node */ | ||
3400 | for_each_active_range_index_in_nid(i, nid) { | ||
3401 | u64 addr; | ||
3402 | u64 ei_start, ei_last; | ||
3403 | |||
3404 | ei_last = early_node_map[i].end_pfn; | ||
3405 | ei_last <<= PAGE_SHIFT; | ||
3406 | ei_start = early_node_map[i].start_pfn; | ||
3407 | ei_start <<= PAGE_SHIFT; | ||
3408 | addr = find_early_area(ei_start, ei_last, | ||
3409 | goal, limit, size, align); | ||
3410 | |||
3411 | if (addr == -1ULL) | ||
3412 | continue; | ||
3413 | |||
3414 | #if 0 | ||
3415 | printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n", | ||
3416 | nid, | ||
3417 | ei_start, ei_last, goal, limit, size, | ||
3418 | align, addr); | ||
3419 | #endif | ||
3420 | |||
3421 | ptr = phys_to_virt(addr); | ||
3422 | memset(ptr, 0, size); | ||
3423 | reserve_early_without_check(addr, addr + size, "BOOTMEM"); | ||
3424 | return ptr; | ||
3425 | } | ||
3426 | |||
3427 | return NULL; | ||
3428 | } | ||
3429 | #endif | ||
3430 | |||
3431 | |||
3377 | void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) | 3432 | void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) |
3378 | { | 3433 | { |
3379 | int i; | 3434 | int i; |
@@ -4406,7 +4461,11 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) | |||
4406 | } | 4461 | } |
4407 | 4462 | ||
4408 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 4463 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
4409 | struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] }; | 4464 | struct pglist_data __refdata contig_page_data = { |
4465 | #ifndef CONFIG_NO_BOOTMEM | ||
4466 | .bdata = &bootmem_node_data[0] | ||
4467 | #endif | ||
4468 | }; | ||
4410 | EXPORT_SYMBOL(contig_page_data); | 4469 | EXPORT_SYMBOL(contig_page_data); |
4411 | #endif | 4470 | #endif |
4412 | 4471 | ||
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index d9714bdcb4a3..392b9bb5bc01 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -40,9 +40,11 @@ static void * __init_refok __earlyonly_bootmem_alloc(int node, | |||
40 | unsigned long align, | 40 | unsigned long align, |
41 | unsigned long goal) | 41 | unsigned long goal) |
42 | { | 42 | { |
43 | return __alloc_bootmem_node(NODE_DATA(node), size, align, goal); | 43 | return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal); |
44 | } | 44 | } |
45 | 45 | ||
46 | static void *vmemmap_buf; | ||
47 | static void *vmemmap_buf_end; | ||
46 | 48 | ||
47 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) | 49 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
48 | { | 50 | { |
@@ -64,6 +66,24 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) | |||
64 | __pa(MAX_DMA_ADDRESS)); | 66 | __pa(MAX_DMA_ADDRESS)); |
65 | } | 67 | } |
66 | 68 | ||
69 | /* need to make sure size is all the same during early stage */ | ||
70 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) | ||
71 | { | ||
72 | void *ptr; | ||
73 | |||
74 | if (!vmemmap_buf) | ||
75 | return vmemmap_alloc_block(size, node); | ||
76 | |||
77 | /* take the from buf */ | ||
78 | ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); | ||
79 | if (ptr + size > vmemmap_buf_end) | ||
80 | return vmemmap_alloc_block(size, node); | ||
81 | |||
82 | vmemmap_buf = ptr + size; | ||
83 | |||
84 | return ptr; | ||
85 | } | ||
86 | |||
67 | void __meminit vmemmap_verify(pte_t *pte, int node, | 87 | void __meminit vmemmap_verify(pte_t *pte, int node, |
68 | unsigned long start, unsigned long end) | 88 | unsigned long start, unsigned long end) |
69 | { | 89 | { |
@@ -80,7 +100,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) | |||
80 | pte_t *pte = pte_offset_kernel(pmd, addr); | 100 | pte_t *pte = pte_offset_kernel(pmd, addr); |
81 | if (pte_none(*pte)) { | 101 | if (pte_none(*pte)) { |
82 | pte_t entry; | 102 | pte_t entry; |
83 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 103 | void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); |
84 | if (!p) | 104 | if (!p) |
85 | return NULL; | 105 | return NULL; |
86 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); | 106 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
@@ -163,3 +183,55 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) | |||
163 | 183 | ||
164 | return map; | 184 | return map; |
165 | } | 185 | } |
186 | |||
187 | void __init sparse_mem_maps_populate_node(struct page **map_map, | ||
188 | unsigned long pnum_begin, | ||
189 | unsigned long pnum_end, | ||
190 | unsigned long map_count, int nodeid) | ||
191 | { | ||
192 | unsigned long pnum; | ||
193 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | ||
194 | void *vmemmap_buf_start; | ||
195 | |||
196 | size = ALIGN(size, PMD_SIZE); | ||
197 | vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, | ||
198 | PMD_SIZE, __pa(MAX_DMA_ADDRESS)); | ||
199 | |||
200 | if (vmemmap_buf_start) { | ||
201 | vmemmap_buf = vmemmap_buf_start; | ||
202 | vmemmap_buf_end = vmemmap_buf_start + size * map_count; | ||
203 | } | ||
204 | |||
205 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
206 | struct mem_section *ms; | ||
207 | |||
208 | if (!present_section_nr(pnum)) | ||
209 | continue; | ||
210 | |||
211 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | ||
212 | if (map_map[pnum]) | ||
213 | continue; | ||
214 | ms = __nr_to_section(pnum); | ||
215 | printk(KERN_ERR "%s: sparsemem memory map backing failed " | ||
216 | "some memory will not be available.\n", __func__); | ||
217 | ms->section_mem_map = 0; | ||
218 | } | ||
219 | |||
220 | if (vmemmap_buf_start) { | ||
221 | /* need to free left buf */ | ||
222 | #ifdef CONFIG_NO_BOOTMEM | ||
223 | free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end)); | ||
224 | if (vmemmap_buf_start < vmemmap_buf) { | ||
225 | char name[15]; | ||
226 | |||
227 | snprintf(name, sizeof(name), "MEMMAP %d", nodeid); | ||
228 | reserve_early_without_check(__pa(vmemmap_buf_start), | ||
229 | __pa(vmemmap_buf), name); | ||
230 | } | ||
231 | #else | ||
232 | free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); | ||
233 | #endif | ||
234 | vmemmap_buf = NULL; | ||
235 | vmemmap_buf_end = NULL; | ||
236 | } | ||
237 | } | ||
diff --git a/mm/sparse.c b/mm/sparse.c index 6ce4aab69e99..22896d589133 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -271,7 +271,8 @@ static unsigned long *__kmalloc_section_usemap(void) | |||
271 | 271 | ||
272 | #ifdef CONFIG_MEMORY_HOTREMOVE | 272 | #ifdef CONFIG_MEMORY_HOTREMOVE |
273 | static unsigned long * __init | 273 | static unsigned long * __init |
274 | sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) | 274 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
275 | unsigned long count) | ||
275 | { | 276 | { |
276 | unsigned long section_nr; | 277 | unsigned long section_nr; |
277 | 278 | ||
@@ -286,7 +287,7 @@ sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) | |||
286 | * this problem. | 287 | * this problem. |
287 | */ | 288 | */ |
288 | section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | 289 | section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
289 | return alloc_bootmem_section(usemap_size(), section_nr); | 290 | return alloc_bootmem_section(usemap_size() * count, section_nr); |
290 | } | 291 | } |
291 | 292 | ||
292 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | 293 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) |
@@ -329,7 +330,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |||
329 | } | 330 | } |
330 | #else | 331 | #else |
331 | static unsigned long * __init | 332 | static unsigned long * __init |
332 | sparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat) | 333 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
334 | unsigned long count) | ||
333 | { | 335 | { |
334 | return NULL; | 336 | return NULL; |
335 | } | 337 | } |
@@ -339,27 +341,40 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |||
339 | } | 341 | } |
340 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 342 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
341 | 343 | ||
342 | static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) | 344 | static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, |
345 | unsigned long pnum_begin, | ||
346 | unsigned long pnum_end, | ||
347 | unsigned long usemap_count, int nodeid) | ||
343 | { | 348 | { |
344 | unsigned long *usemap; | 349 | void *usemap; |
345 | struct mem_section *ms = __nr_to_section(pnum); | 350 | unsigned long pnum; |
346 | int nid = sparse_early_nid(ms); | 351 | int size = usemap_size(); |
347 | |||
348 | usemap = sparse_early_usemap_alloc_pgdat_section(NODE_DATA(nid)); | ||
349 | if (usemap) | ||
350 | return usemap; | ||
351 | 352 | ||
352 | usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); | 353 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
354 | usemap_count); | ||
353 | if (usemap) { | 355 | if (usemap) { |
354 | check_usemap_section_nr(nid, usemap); | 356 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
355 | return usemap; | 357 | if (!present_section_nr(pnum)) |
358 | continue; | ||
359 | usemap_map[pnum] = usemap; | ||
360 | usemap += size; | ||
361 | } | ||
362 | return; | ||
356 | } | 363 | } |
357 | 364 | ||
358 | /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ | 365 | usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count); |
359 | nid = 0; | 366 | if (usemap) { |
367 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
368 | if (!present_section_nr(pnum)) | ||
369 | continue; | ||
370 | usemap_map[pnum] = usemap; | ||
371 | usemap += size; | ||
372 | check_usemap_section_nr(nodeid, usemap_map[pnum]); | ||
373 | } | ||
374 | return; | ||
375 | } | ||
360 | 376 | ||
361 | printk(KERN_WARNING "%s: allocation failed\n", __func__); | 377 | printk(KERN_WARNING "%s: allocation failed\n", __func__); |
362 | return NULL; | ||
363 | } | 378 | } |
364 | 379 | ||
365 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 380 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
@@ -375,8 +390,65 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) | |||
375 | PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); | 390 | PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); |
376 | return map; | 391 | return map; |
377 | } | 392 | } |
393 | void __init sparse_mem_maps_populate_node(struct page **map_map, | ||
394 | unsigned long pnum_begin, | ||
395 | unsigned long pnum_end, | ||
396 | unsigned long map_count, int nodeid) | ||
397 | { | ||
398 | void *map; | ||
399 | unsigned long pnum; | ||
400 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | ||
401 | |||
402 | map = alloc_remap(nodeid, size * map_count); | ||
403 | if (map) { | ||
404 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
405 | if (!present_section_nr(pnum)) | ||
406 | continue; | ||
407 | map_map[pnum] = map; | ||
408 | map += size; | ||
409 | } | ||
410 | return; | ||
411 | } | ||
412 | |||
413 | size = PAGE_ALIGN(size); | ||
414 | map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count); | ||
415 | if (map) { | ||
416 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
417 | if (!present_section_nr(pnum)) | ||
418 | continue; | ||
419 | map_map[pnum] = map; | ||
420 | map += size; | ||
421 | } | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | /* fallback */ | ||
426 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | ||
427 | struct mem_section *ms; | ||
428 | |||
429 | if (!present_section_nr(pnum)) | ||
430 | continue; | ||
431 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | ||
432 | if (map_map[pnum]) | ||
433 | continue; | ||
434 | ms = __nr_to_section(pnum); | ||
435 | printk(KERN_ERR "%s: sparsemem memory map backing failed " | ||
436 | "some memory will not be available.\n", __func__); | ||
437 | ms->section_mem_map = 0; | ||
438 | } | ||
439 | } | ||
378 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | 440 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
379 | 441 | ||
442 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
443 | static void __init sparse_early_mem_maps_alloc_node(struct page **map_map, | ||
444 | unsigned long pnum_begin, | ||
445 | unsigned long pnum_end, | ||
446 | unsigned long map_count, int nodeid) | ||
447 | { | ||
448 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, | ||
449 | map_count, nodeid); | ||
450 | } | ||
451 | #else | ||
380 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | 452 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
381 | { | 453 | { |
382 | struct page *map; | 454 | struct page *map; |
@@ -392,10 +464,12 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |||
392 | ms->section_mem_map = 0; | 464 | ms->section_mem_map = 0; |
393 | return NULL; | 465 | return NULL; |
394 | } | 466 | } |
467 | #endif | ||
395 | 468 | ||
396 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) | 469 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) |
397 | { | 470 | { |
398 | } | 471 | } |
472 | |||
399 | /* | 473 | /* |
400 | * Allocate the accumulated non-linear sections, allocate a mem_map | 474 | * Allocate the accumulated non-linear sections, allocate a mem_map |
401 | * for each and record the physical to section mapping. | 475 | * for each and record the physical to section mapping. |
@@ -407,6 +481,14 @@ void __init sparse_init(void) | |||
407 | unsigned long *usemap; | 481 | unsigned long *usemap; |
408 | unsigned long **usemap_map; | 482 | unsigned long **usemap_map; |
409 | int size; | 483 | int size; |
484 | int nodeid_begin = 0; | ||
485 | unsigned long pnum_begin = 0; | ||
486 | unsigned long usemap_count; | ||
487 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
488 | unsigned long map_count; | ||
489 | int size2; | ||
490 | struct page **map_map; | ||
491 | #endif | ||
410 | 492 | ||
411 | /* | 493 | /* |
412 | * map is using big page (aka 2M in x86 64 bit) | 494 | * map is using big page (aka 2M in x86 64 bit) |
@@ -425,10 +507,81 @@ void __init sparse_init(void) | |||
425 | panic("can not allocate usemap_map\n"); | 507 | panic("can not allocate usemap_map\n"); |
426 | 508 | ||
427 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 509 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
510 | struct mem_section *ms; | ||
511 | |||
428 | if (!present_section_nr(pnum)) | 512 | if (!present_section_nr(pnum)) |
429 | continue; | 513 | continue; |
430 | usemap_map[pnum] = sparse_early_usemap_alloc(pnum); | 514 | ms = __nr_to_section(pnum); |
515 | nodeid_begin = sparse_early_nid(ms); | ||
516 | pnum_begin = pnum; | ||
517 | break; | ||
431 | } | 518 | } |
519 | usemap_count = 1; | ||
520 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | ||
521 | struct mem_section *ms; | ||
522 | int nodeid; | ||
523 | |||
524 | if (!present_section_nr(pnum)) | ||
525 | continue; | ||
526 | ms = __nr_to_section(pnum); | ||
527 | nodeid = sparse_early_nid(ms); | ||
528 | if (nodeid == nodeid_begin) { | ||
529 | usemap_count++; | ||
530 | continue; | ||
531 | } | ||
532 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | ||
533 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum, | ||
534 | usemap_count, nodeid_begin); | ||
535 | /* new start, update count etc*/ | ||
536 | nodeid_begin = nodeid; | ||
537 | pnum_begin = pnum; | ||
538 | usemap_count = 1; | ||
539 | } | ||
540 | /* ok, last chunk */ | ||
541 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, | ||
542 | usemap_count, nodeid_begin); | ||
543 | |||
544 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
545 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; | ||
546 | map_map = alloc_bootmem(size2); | ||
547 | if (!map_map) | ||
548 | panic("can not allocate map_map\n"); | ||
549 | |||
550 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | ||
551 | struct mem_section *ms; | ||
552 | |||
553 | if (!present_section_nr(pnum)) | ||
554 | continue; | ||
555 | ms = __nr_to_section(pnum); | ||
556 | nodeid_begin = sparse_early_nid(ms); | ||
557 | pnum_begin = pnum; | ||
558 | break; | ||
559 | } | ||
560 | map_count = 1; | ||
561 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | ||
562 | struct mem_section *ms; | ||
563 | int nodeid; | ||
564 | |||
565 | if (!present_section_nr(pnum)) | ||
566 | continue; | ||
567 | ms = __nr_to_section(pnum); | ||
568 | nodeid = sparse_early_nid(ms); | ||
569 | if (nodeid == nodeid_begin) { | ||
570 | map_count++; | ||
571 | continue; | ||
572 | } | ||
573 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | ||
574 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum, | ||
575 | map_count, nodeid_begin); | ||
576 | /* new start, update count etc*/ | ||
577 | nodeid_begin = nodeid; | ||
578 | pnum_begin = pnum; | ||
579 | map_count = 1; | ||
580 | } | ||
581 | /* ok, last chunk */ | ||
582 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS, | ||
583 | map_count, nodeid_begin); | ||
584 | #endif | ||
432 | 585 | ||
433 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | 586 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
434 | if (!present_section_nr(pnum)) | 587 | if (!present_section_nr(pnum)) |
@@ -438,7 +591,11 @@ void __init sparse_init(void) | |||
438 | if (!usemap) | 591 | if (!usemap) |
439 | continue; | 592 | continue; |
440 | 593 | ||
594 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
595 | map = map_map[pnum]; | ||
596 | #else | ||
441 | map = sparse_early_mem_map_alloc(pnum); | 597 | map = sparse_early_mem_map_alloc(pnum); |
598 | #endif | ||
442 | if (!map) | 599 | if (!map) |
443 | continue; | 600 | continue; |
444 | 601 | ||
@@ -448,6 +605,9 @@ void __init sparse_init(void) | |||
448 | 605 | ||
449 | vmemmap_populate_print_last(); | 606 | vmemmap_populate_print_last(); |
450 | 607 | ||
608 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | ||
609 | free_bootmem(__pa(map_map), size2); | ||
610 | #endif | ||
451 | free_bootmem(__pa(usemap_map), size); | 611 | free_bootmem(__pa(usemap_map), size); |
452 | } | 612 | } |
453 | 613 | ||