aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2011-02-24 08:43:05 -0500
committerTejun Heo <tj@kernel.org>2011-02-24 08:43:05 -0500
commit0932587328d9bd5b500a640fbaff3290c8d4cabf (patch)
tree7c041a41db88f7bb6d98f2a69e21a33a311b469f
parent2bf50555b0920be7e29d3823f6bbd20ee5920489 (diff)
bootmem: Separate out CONFIG_NO_BOOTMEM code into nobootmem.c
mm/bootmem.c contained code paths for both bootmem and no bootmem configurations. They implement about the same set of APIs in different ways and as a result bootmem.c contains massive amount of #ifdef CONFIG_NO_BOOTMEM. Separate out CONFIG_NO_BOOTMEM code into mm/nobootmem.c. As the common part is relatively small, duplicate them in nobootmem.c instead of creating a common file or ifdef'ing in bootmem.c. The followings are duplicated. * {min|max}_low_pfn, max_pfn, saved_max_pfn * free_bootmem_late() * ___alloc_bootmem() * __alloc_bootmem_low() The followings are applicable only to nobootmem and moved verbatim. * __free_pages_memory() * free_all_memory_core_early() The followings are not applicable to nobootmem and omitted in nobootmem.c. * reserve_bootmem_node() * reserve_bootmem() The rest split function bodies according to CONFIG_NO_BOOTMEM. Makefile is updated so that only either bootmem.c or nobootmem.c is built according to CONFIG_NO_BOOTMEM. This patch doesn't introduce any behavior change. -tj: Rewrote commit description. Suggested-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Acked-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--mm/Makefile8
-rw-r--r--mm/bootmem.c173
-rw-r--r--mm/nobootmem.c405
3 files changed, 415 insertions, 171 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 2b1b575ae712..42a8326c3e3d 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -7,7 +7,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
7 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ 7 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
8 vmalloc.o pagewalk.o pgtable-generic.o 8 vmalloc.o pagewalk.o pgtable-generic.o
9 9
10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ 10obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
11 maccess.o page_alloc.o page-writeback.o \ 11 maccess.o page_alloc.o page-writeback.o \
12 readahead.o swap.o truncate.o vmscan.o shmem.o \ 12 readahead.o swap.o truncate.o vmscan.o shmem.o \
13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
@@ -15,6 +15,12 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
15 $(mmu-y) 15 $(mmu-y)
16obj-y += init-mm.o 16obj-y += init-mm.o
17 17
18ifdef CONFIG_NO_BOOTMEM
19 obj-y += nobootmem.o
20else
21 obj-y += bootmem.o
22endif
23
18obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o 24obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
19 25
20obj-$(CONFIG_BOUNCE) += bounce.o 26obj-$(CONFIG_BOUNCE) += bounce.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 13b0caa9793c..4403e2fbc13d 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -35,7 +35,6 @@ unsigned long max_pfn;
35unsigned long saved_max_pfn; 35unsigned long saved_max_pfn;
36#endif 36#endif
37 37
38#ifndef CONFIG_NO_BOOTMEM
39bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; 38bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
40 39
41static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); 40static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
@@ -146,7 +145,7 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
146 min_low_pfn = start; 145 min_low_pfn = start;
147 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 146 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
148} 147}
149#endif 148
150/* 149/*
151 * free_bootmem_late - free bootmem pages directly to page allocator 150 * free_bootmem_late - free bootmem pages directly to page allocator
152 * @addr: starting address of the range 151 * @addr: starting address of the range
@@ -171,53 +170,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
171 } 170 }
172} 171}
173 172
174#ifdef CONFIG_NO_BOOTMEM
175static void __init __free_pages_memory(unsigned long start, unsigned long end)
176{
177 int i;
178 unsigned long start_aligned, end_aligned;
179 int order = ilog2(BITS_PER_LONG);
180
181 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
182 end_aligned = end & ~(BITS_PER_LONG - 1);
183
184 if (end_aligned <= start_aligned) {
185 for (i = start; i < end; i++)
186 __free_pages_bootmem(pfn_to_page(i), 0);
187
188 return;
189 }
190
191 for (i = start; i < start_aligned; i++)
192 __free_pages_bootmem(pfn_to_page(i), 0);
193
194 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
195 __free_pages_bootmem(pfn_to_page(i), order);
196
197 for (i = end_aligned; i < end; i++)
198 __free_pages_bootmem(pfn_to_page(i), 0);
199}
200
201unsigned long __init free_all_memory_core_early(int nodeid)
202{
203 int i;
204 u64 start, end;
205 unsigned long count = 0;
206 struct range *range = NULL;
207 int nr_range;
208
209 nr_range = get_free_all_memory_range(&range, nodeid);
210
211 for (i = 0; i < nr_range; i++) {
212 start = range[i].start;
213 end = range[i].end;
214 count += end - start;
215 __free_pages_memory(start, end);
216 }
217
218 return count;
219}
220#else
221static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 173static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
222{ 174{
223 int aligned; 175 int aligned;
@@ -278,7 +230,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
278 230
279 return count; 231 return count;
280} 232}
281#endif
282 233
283/** 234/**
284 * free_all_bootmem_node - release a node's free pages to the buddy allocator 235 * free_all_bootmem_node - release a node's free pages to the buddy allocator
@@ -289,12 +240,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
289unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) 240unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
290{ 241{
291 register_page_bootmem_info_node(pgdat); 242 register_page_bootmem_info_node(pgdat);
292#ifdef CONFIG_NO_BOOTMEM
293 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
294 return 0;
295#else
296 return free_all_bootmem_core(pgdat->bdata); 243 return free_all_bootmem_core(pgdat->bdata);
297#endif
298} 244}
299 245
300/** 246/**
@@ -304,16 +250,6 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
304 */ 250 */
305unsigned long __init free_all_bootmem(void) 251unsigned long __init free_all_bootmem(void)
306{ 252{
307#ifdef CONFIG_NO_BOOTMEM
308 /*
309 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
310 * because in some case like Node0 doesnt have RAM installed
311 * low ram will be on Node1
312 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
313 * will be used instead of only Node0 related
314 */
315 return free_all_memory_core_early(MAX_NUMNODES);
316#else
317 unsigned long total_pages = 0; 253 unsigned long total_pages = 0;
318 bootmem_data_t *bdata; 254 bootmem_data_t *bdata;
319 255
@@ -321,10 +257,8 @@ unsigned long __init free_all_bootmem(void)
321 total_pages += free_all_bootmem_core(bdata); 257 total_pages += free_all_bootmem_core(bdata);
322 258
323 return total_pages; 259 return total_pages;
324#endif
325} 260}
326 261
327#ifndef CONFIG_NO_BOOTMEM
328static void __init __free(bootmem_data_t *bdata, 262static void __init __free(bootmem_data_t *bdata,
329 unsigned long sidx, unsigned long eidx) 263 unsigned long sidx, unsigned long eidx)
330{ 264{
@@ -419,7 +353,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
419 } 353 }
420 BUG(); 354 BUG();
421} 355}
422#endif
423 356
424/** 357/**
425 * free_bootmem_node - mark a page range as usable 358 * free_bootmem_node - mark a page range as usable
@@ -434,10 +367,6 @@ static int __init mark_bootmem(unsigned long start, unsigned long end,
434void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 367void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
435 unsigned long size) 368 unsigned long size)
436{ 369{
437#ifdef CONFIG_NO_BOOTMEM
438 kmemleak_free_part(__va(physaddr), size);
439 memblock_x86_free_range(physaddr, physaddr + size);
440#else
441 unsigned long start, end; 370 unsigned long start, end;
442 371
443 kmemleak_free_part(__va(physaddr), size); 372 kmemleak_free_part(__va(physaddr), size);
@@ -446,7 +375,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
446 end = PFN_DOWN(physaddr + size); 375 end = PFN_DOWN(physaddr + size);
447 376
448 mark_bootmem_node(pgdat->bdata, start, end, 0, 0); 377 mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
449#endif
450} 378}
451 379
452/** 380/**
@@ -460,10 +388,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
460 */ 388 */
461void __init free_bootmem(unsigned long addr, unsigned long size) 389void __init free_bootmem(unsigned long addr, unsigned long size)
462{ 390{
463#ifdef CONFIG_NO_BOOTMEM
464 kmemleak_free_part(__va(addr), size);
465 memblock_x86_free_range(addr, addr + size);
466#else
467 unsigned long start, end; 391 unsigned long start, end;
468 392
469 kmemleak_free_part(__va(addr), size); 393 kmemleak_free_part(__va(addr), size);
@@ -472,7 +396,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
472 end = PFN_DOWN(addr + size); 396 end = PFN_DOWN(addr + size);
473 397
474 mark_bootmem(start, end, 0, 0); 398 mark_bootmem(start, end, 0, 0);
475#endif
476} 399}
477 400
478/** 401/**
@@ -489,17 +412,12 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
489int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 412int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
490 unsigned long size, int flags) 413 unsigned long size, int flags)
491{ 414{
492#ifdef CONFIG_NO_BOOTMEM
493 panic("no bootmem");
494 return 0;
495#else
496 unsigned long start, end; 415 unsigned long start, end;
497 416
498 start = PFN_DOWN(physaddr); 417 start = PFN_DOWN(physaddr);
499 end = PFN_UP(physaddr + size); 418 end = PFN_UP(physaddr + size);
500 419
501 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); 420 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
502#endif
503} 421}
504 422
505/** 423/**
@@ -515,20 +433,14 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
515int __init reserve_bootmem(unsigned long addr, unsigned long size, 433int __init reserve_bootmem(unsigned long addr, unsigned long size,
516 int flags) 434 int flags)
517{ 435{
518#ifdef CONFIG_NO_BOOTMEM
519 panic("no bootmem");
520 return 0;
521#else
522 unsigned long start, end; 436 unsigned long start, end;
523 437
524 start = PFN_DOWN(addr); 438 start = PFN_DOWN(addr);
525 end = PFN_UP(addr + size); 439 end = PFN_UP(addr + size);
526 440
527 return mark_bootmem(start, end, 1, flags); 441 return mark_bootmem(start, end, 1, flags);
528#endif
529} 442}
530 443
531#ifndef CONFIG_NO_BOOTMEM
532int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len, 444int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
533 int flags) 445 int flags)
534{ 446{
@@ -685,33 +597,12 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
685#endif 597#endif
686 return NULL; 598 return NULL;
687} 599}
688#endif
689 600
690static void * __init ___alloc_bootmem_nopanic(unsigned long size, 601static void * __init ___alloc_bootmem_nopanic(unsigned long size,
691 unsigned long align, 602 unsigned long align,
692 unsigned long goal, 603 unsigned long goal,
693 unsigned long limit) 604 unsigned long limit)
694{ 605{
695#ifdef CONFIG_NO_BOOTMEM
696 void *ptr;
697
698 if (WARN_ON_ONCE(slab_is_available()))
699 return kzalloc(size, GFP_NOWAIT);
700
701restart:
702
703 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
704
705 if (ptr)
706 return ptr;
707
708 if (goal != 0) {
709 goal = 0;
710 goto restart;
711 }
712
713 return NULL;
714#else
715 bootmem_data_t *bdata; 606 bootmem_data_t *bdata;
716 void *region; 607 void *region;
717 608
@@ -737,7 +628,6 @@ restart:
737 } 628 }
738 629
739 return NULL; 630 return NULL;
740#endif
741} 631}
742 632
743/** 633/**
@@ -758,10 +648,6 @@ void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
758{ 648{
759 unsigned long limit = 0; 649 unsigned long limit = 0;
760 650
761#ifdef CONFIG_NO_BOOTMEM
762 limit = -1UL;
763#endif
764
765 return ___alloc_bootmem_nopanic(size, align, goal, limit); 651 return ___alloc_bootmem_nopanic(size, align, goal, limit);
766} 652}
767 653
@@ -798,14 +684,9 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
798{ 684{
799 unsigned long limit = 0; 685 unsigned long limit = 0;
800 686
801#ifdef CONFIG_NO_BOOTMEM
802 limit = -1UL;
803#endif
804
805 return ___alloc_bootmem(size, align, goal, limit); 687 return ___alloc_bootmem(size, align, goal, limit);
806} 688}
807 689
808#ifndef CONFIG_NO_BOOTMEM
809static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, 690static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
810 unsigned long size, unsigned long align, 691 unsigned long size, unsigned long align,
811 unsigned long goal, unsigned long limit) 692 unsigned long goal, unsigned long limit)
@@ -822,7 +703,6 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
822 703
823 return ___alloc_bootmem(size, align, goal, limit); 704 return ___alloc_bootmem(size, align, goal, limit);
824} 705}
825#endif
826 706
827/** 707/**
828 * __alloc_bootmem_node - allocate boot memory from a specific node 708 * __alloc_bootmem_node - allocate boot memory from a specific node
@@ -842,24 +722,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
842void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 722void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
843 unsigned long align, unsigned long goal) 723 unsigned long align, unsigned long goal)
844{ 724{
845 void *ptr;
846
847 if (WARN_ON_ONCE(slab_is_available())) 725 if (WARN_ON_ONCE(slab_is_available()))
848 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 726 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
849 727
850#ifdef CONFIG_NO_BOOTMEM 728 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
851 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
852 goal, -1ULL);
853 if (ptr)
854 return ptr;
855
856 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
857 goal, -1ULL);
858#else
859 ptr = ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
860#endif
861
862 return ptr;
863} 729}
864 730
865void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 731void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -880,13 +746,8 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
880 unsigned long new_goal; 746 unsigned long new_goal;
881 747
882 new_goal = MAX_DMA32_PFN << PAGE_SHIFT; 748 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
883#ifdef CONFIG_NO_BOOTMEM
884 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
885 new_goal, -1ULL);
886#else
887 ptr = alloc_bootmem_core(pgdat->bdata, size, align, 749 ptr = alloc_bootmem_core(pgdat->bdata, size, align,
888 new_goal, 0); 750 new_goal, 0);
889#endif
890 if (ptr) 751 if (ptr)
891 return ptr; 752 return ptr;
892 } 753 }
@@ -907,16 +768,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
907void * __init alloc_bootmem_section(unsigned long size, 768void * __init alloc_bootmem_section(unsigned long size,
908 unsigned long section_nr) 769 unsigned long section_nr)
909{ 770{
910#ifdef CONFIG_NO_BOOTMEM
911 unsigned long pfn, goal, limit;
912
913 pfn = section_nr_to_pfn(section_nr);
914 goal = pfn << PAGE_SHIFT;
915 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
916
917 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
918 SMP_CACHE_BYTES, goal, limit);
919#else
920 bootmem_data_t *bdata; 771 bootmem_data_t *bdata;
921 unsigned long pfn, goal, limit; 772 unsigned long pfn, goal, limit;
922 773
@@ -926,7 +777,6 @@ void * __init alloc_bootmem_section(unsigned long size,
926 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; 777 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
927 778
928 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); 779 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
929#endif
930} 780}
931#endif 781#endif
932 782
@@ -938,16 +788,11 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
938 if (WARN_ON_ONCE(slab_is_available())) 788 if (WARN_ON_ONCE(slab_is_available()))
939 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 789 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
940 790
941#ifdef CONFIG_NO_BOOTMEM
942 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
943 goal, -1ULL);
944#else
945 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); 791 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
946 if (ptr) 792 if (ptr)
947 return ptr; 793 return ptr;
948 794
949 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); 795 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
950#endif
951 if (ptr) 796 if (ptr)
952 return ptr; 797 return ptr;
953 798
@@ -995,21 +840,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
995void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 840void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
996 unsigned long align, unsigned long goal) 841 unsigned long align, unsigned long goal)
997{ 842{
998 void *ptr;
999
1000 if (WARN_ON_ONCE(slab_is_available())) 843 if (WARN_ON_ONCE(slab_is_available()))
1001 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 844 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
1002 845
1003#ifdef CONFIG_NO_BOOTMEM 846 return ___alloc_bootmem_node(pgdat->bdata, size, align,
1004 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
1005 goal, ARCH_LOW_ADDRESS_LIMIT); 847 goal, ARCH_LOW_ADDRESS_LIMIT);
1006 if (ptr)
1007 return ptr;
1008 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
1009 goal, ARCH_LOW_ADDRESS_LIMIT);
1010#else
1011 ptr = ___alloc_bootmem_node(pgdat->bdata, size, align,
1012 goal, ARCH_LOW_ADDRESS_LIMIT);
1013#endif
1014 return ptr;
1015} 848}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
new file mode 100644
index 000000000000..f220b8d0a97d
--- /dev/null
+++ b/mm/nobootmem.c
@@ -0,0 +1,405 @@
1/*
2 * bootmem - A boot-time physical memory allocator and configurator
3 *
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
7 *
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
10 */
11#include <linux/init.h>
12#include <linux/pfn.h>
13#include <linux/slab.h>
14#include <linux/bootmem.h>
15#include <linux/module.h>
16#include <linux/kmemleak.h>
17#include <linux/range.h>
18#include <linux/memblock.h>
19
20#include <asm/bug.h>
21#include <asm/io.h>
22#include <asm/processor.h>
23
24#include "internal.h"
25
26unsigned long max_low_pfn;
27unsigned long min_low_pfn;
28unsigned long max_pfn;
29
30#ifdef CONFIG_CRASH_DUMP
31/*
32 * If we have booted due to a crash, max_pfn will be a very low value. We need
33 * to know the amount of memory that the previous kernel used.
34 */
35unsigned long saved_max_pfn;
36#endif
37
38/*
39 * free_bootmem_late - free bootmem pages directly to page allocator
40 * @addr: starting address of the range
41 * @size: size of the range in bytes
42 *
43 * This is only useful when the bootmem allocator has already been torn
44 * down, but we are still initializing the system. Pages are given directly
45 * to the page allocator, no bootmem metadata is updated because it is gone.
46 */
47void __init free_bootmem_late(unsigned long addr, unsigned long size)
48{
49 unsigned long cursor, end;
50
51 kmemleak_free_part(__va(addr), size);
52
53 cursor = PFN_UP(addr);
54 end = PFN_DOWN(addr + size);
55
56 for (; cursor < end; cursor++) {
57 __free_pages_bootmem(pfn_to_page(cursor), 0);
58 totalram_pages++;
59 }
60}
61
62static void __init __free_pages_memory(unsigned long start, unsigned long end)
63{
64 int i;
65 unsigned long start_aligned, end_aligned;
66 int order = ilog2(BITS_PER_LONG);
67
68 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
69 end_aligned = end & ~(BITS_PER_LONG - 1);
70
71 if (end_aligned <= start_aligned) {
72 for (i = start; i < end; i++)
73 __free_pages_bootmem(pfn_to_page(i), 0);
74
75 return;
76 }
77
78 for (i = start; i < start_aligned; i++)
79 __free_pages_bootmem(pfn_to_page(i), 0);
80
81 for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
82 __free_pages_bootmem(pfn_to_page(i), order);
83
84 for (i = end_aligned; i < end; i++)
85 __free_pages_bootmem(pfn_to_page(i), 0);
86}
87
88unsigned long __init free_all_memory_core_early(int nodeid)
89{
90 int i;
91 u64 start, end;
92 unsigned long count = 0;
93 struct range *range = NULL;
94 int nr_range;
95
96 nr_range = get_free_all_memory_range(&range, nodeid);
97
98 for (i = 0; i < nr_range; i++) {
99 start = range[i].start;
100 end = range[i].end;
101 count += end - start;
102 __free_pages_memory(start, end);
103 }
104
105 return count;
106}
107
108/**
109 * free_all_bootmem_node - release a node's free pages to the buddy allocator
110 * @pgdat: node to be released
111 *
112 * Returns the number of pages actually released.
113 */
114unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
115{
116 register_page_bootmem_info_node(pgdat);
117
118 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
119 return 0;
120}
121
122/**
123 * free_all_bootmem - release free pages to the buddy allocator
124 *
125 * Returns the number of pages actually released.
126 */
127unsigned long __init free_all_bootmem(void)
128{
129 /*
130 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
131 * because in some case like Node0 doesnt have RAM installed
132 * low ram will be on Node1
133 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
134 * will be used instead of only Node0 related
135 */
136 return free_all_memory_core_early(MAX_NUMNODES);
137}
138
139/**
140 * free_bootmem_node - mark a page range as usable
141 * @pgdat: node the range resides on
142 * @physaddr: starting address of the range
143 * @size: size of the range in bytes
144 *
145 * Partial pages will be considered reserved and left as they are.
146 *
147 * The range must reside completely on the specified node.
148 */
149void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
150 unsigned long size)
151{
152 kmemleak_free_part(__va(physaddr), size);
153 memblock_x86_free_range(physaddr, physaddr + size);
154}
155
156/**
157 * free_bootmem - mark a page range as usable
158 * @addr: starting address of the range
159 * @size: size of the range in bytes
160 *
161 * Partial pages will be considered reserved and left as they are.
162 *
163 * The range must be contiguous but may span node boundaries.
164 */
165void __init free_bootmem(unsigned long addr, unsigned long size)
166{
167 kmemleak_free_part(__va(addr), size);
168 memblock_x86_free_range(addr, addr + size);
169}
170
171static void * __init ___alloc_bootmem_nopanic(unsigned long size,
172 unsigned long align,
173 unsigned long goal,
174 unsigned long limit)
175{
176 void *ptr;
177
178 if (WARN_ON_ONCE(slab_is_available()))
179 return kzalloc(size, GFP_NOWAIT);
180
181restart:
182
183 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
184
185 if (ptr)
186 return ptr;
187
188 if (goal != 0) {
189 goal = 0;
190 goto restart;
191 }
192
193 return NULL;
194}
195
196/**
197 * __alloc_bootmem_nopanic - allocate boot memory without panicking
198 * @size: size of the request in bytes
199 * @align: alignment of the region
200 * @goal: preferred starting address of the region
201 *
202 * The goal is dropped if it can not be satisfied and the allocation will
203 * fall back to memory below @goal.
204 *
205 * Allocation may happen on any node in the system.
206 *
207 * Returns NULL on failure.
208 */
209void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
210 unsigned long goal)
211{
212 unsigned long limit = -1UL;
213
214 return ___alloc_bootmem_nopanic(size, align, goal, limit);
215}
216
217static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
218 unsigned long goal, unsigned long limit)
219{
220 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
221
222 if (mem)
223 return mem;
224 /*
225 * Whoops, we cannot satisfy the allocation request.
226 */
227 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
228 panic("Out of memory");
229 return NULL;
230}
231
232/**
233 * __alloc_bootmem - allocate boot memory
234 * @size: size of the request in bytes
235 * @align: alignment of the region
236 * @goal: preferred starting address of the region
237 *
238 * The goal is dropped if it can not be satisfied and the allocation will
239 * fall back to memory below @goal.
240 *
241 * Allocation may happen on any node in the system.
242 *
243 * The function panics if the request can not be satisfied.
244 */
245void * __init __alloc_bootmem(unsigned long size, unsigned long align,
246 unsigned long goal)
247{
248 unsigned long limit = -1UL;
249
250 return ___alloc_bootmem(size, align, goal, limit);
251}
252
253/**
254 * __alloc_bootmem_node - allocate boot memory from a specific node
255 * @pgdat: node to allocate from
256 * @size: size of the request in bytes
257 * @align: alignment of the region
258 * @goal: preferred starting address of the region
259 *
260 * The goal is dropped if it can not be satisfied and the allocation will
261 * fall back to memory below @goal.
262 *
263 * Allocation may fall back to any node in the system if the specified node
264 * can not hold the requested memory.
265 *
266 * The function panics if the request can not be satisfied.
267 */
268void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
269 unsigned long align, unsigned long goal)
270{
271 void *ptr;
272
273 if (WARN_ON_ONCE(slab_is_available()))
274 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
275
276 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
277 goal, -1ULL);
278 if (ptr)
279 return ptr;
280
281 return __alloc_memory_core_early(MAX_NUMNODES, size, align,
282 goal, -1ULL);
283}
284
285void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
286 unsigned long align, unsigned long goal)
287{
288#ifdef MAX_DMA32_PFN
289 unsigned long end_pfn;
290
291 if (WARN_ON_ONCE(slab_is_available()))
292 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
293
294 /* update goal according ...MAX_DMA32_PFN */
295 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
296
297 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
298 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
299 void *ptr;
300 unsigned long new_goal;
301
302 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
303 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
304 new_goal, -1ULL);
305 if (ptr)
306 return ptr;
307 }
308#endif
309
310 return __alloc_bootmem_node(pgdat, size, align, goal);
311
312}
313
314#ifdef CONFIG_SPARSEMEM
315/**
316 * alloc_bootmem_section - allocate boot memory from a specific section
317 * @size: size of the request in bytes
318 * @section_nr: sparse map section to allocate from
319 *
320 * Return NULL on failure.
321 */
322void * __init alloc_bootmem_section(unsigned long size,
323 unsigned long section_nr)
324{
325 unsigned long pfn, goal, limit;
326
327 pfn = section_nr_to_pfn(section_nr);
328 goal = pfn << PAGE_SHIFT;
329 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
330
331 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
332 SMP_CACHE_BYTES, goal, limit);
333}
334#endif
335
336void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
337 unsigned long align, unsigned long goal)
338{
339 void *ptr;
340
341 if (WARN_ON_ONCE(slab_is_available()))
342 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
343
344 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
345 goal, -1ULL);
346 if (ptr)
347 return ptr;
348
349 return __alloc_bootmem_nopanic(size, align, goal);
350}
351
352#ifndef ARCH_LOW_ADDRESS_LIMIT
353#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
354#endif
355
356/**
357 * __alloc_bootmem_low - allocate low boot memory
358 * @size: size of the request in bytes
359 * @align: alignment of the region
360 * @goal: preferred starting address of the region
361 *
362 * The goal is dropped if it can not be satisfied and the allocation will
363 * fall back to memory below @goal.
364 *
365 * Allocation may happen on any node in the system.
366 *
367 * The function panics if the request can not be satisfied.
368 */
369void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
370 unsigned long goal)
371{
372 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
373}
374
375/**
376 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
377 * @pgdat: node to allocate from
378 * @size: size of the request in bytes
379 * @align: alignment of the region
380 * @goal: preferred starting address of the region
381 *
382 * The goal is dropped if it can not be satisfied and the allocation will
383 * fall back to memory below @goal.
384 *
385 * Allocation may fall back to any node in the system if the specified node
386 * can not hold the requested memory.
387 *
388 * The function panics if the request can not be satisfied.
389 */
390void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
391 unsigned long align, unsigned long goal)
392{
393 void *ptr;
394
395 if (WARN_ON_ONCE(slab_is_available()))
396 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
397
398 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
399 goal, ARCH_LOW_ADDRESS_LIMIT);
400 if (ptr)
401 return ptr;
402
403 return __alloc_memory_core_early(MAX_NUMNODES, size, align,
404 goal, ARCH_LOW_ADDRESS_LIMIT);
405}