aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-08-25 16:39:17 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-08-27 14:13:47 -0400
commita9ce6bc15100023b411f8117e53a016d61889800 (patch)
treec9cb1468fde867fd9bfeb0cb33ffe994b0720aaa
parent72d7c3b33c980843e756681fb4867dc1efd62a76 (diff)
x86, memblock: Replace e820_/_early string with memblock_
1.include linux/memblock.h directly. so later could reduce e820.h reference. 2 this patch is done by sed scripts mainly -v2: use MEMBLOCK_ERROR instead of -1ULL or -1UL Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/kernel/acpi/sleep.c9
-rw-r--r--arch/x86/kernel/apic/numaq_32.c3
-rw-r--r--arch/x86/kernel/efi.c5
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c4
-rw-r--r--arch/x86/kernel/setup.c29
-rw-r--r--arch/x86/kernel/trampoline.c10
-rw-r--r--arch/x86/mm/init.c10
-rw-r--r--arch/x86/mm/init_32.c14
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/mm/k8topology_64.c4
-rw-r--r--arch/x86/mm/memtest.c7
-rw-r--r--arch/x86/mm/numa_32.c25
-rw-r--r--arch/x86/mm/numa_64.c34
-rw-r--r--arch/x86/mm/srat_32.c3
-rw-r--r--arch/x86/mm/srat_64.c11
-rw-r--r--arch/x86/xen/mmu.c5
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--mm/bootmem.c4
20 files changed, 105 insertions, 92 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 8406ed7f9926..8e4a16508d4e 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -90,7 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
90#endif /* CONFIG_X86_32 */ 90#endif /* CONFIG_X86_32 */
91 91
92extern int add_efi_memmap; 92extern int add_efi_memmap;
93extern void efi_reserve_early(void); 93extern void efi_memblock_x86_reserve_range(void);
94extern void efi_call_phys_prelog(void); 94extern void efi_call_phys_prelog(void);
95extern void efi_call_phys_epilog(void); 95extern void efi_call_phys_epilog(void);
96 96
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index fcc3c61fdecc..d829e75f9687 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/acpi.h> 8#include <linux/acpi.h>
9#include <linux/bootmem.h> 9#include <linux/bootmem.h>
10#include <linux/memblock.h>
10#include <linux/dmi.h> 11#include <linux/dmi.h>
11#include <linux/cpumask.h> 12#include <linux/cpumask.h>
12#include <asm/segment.h> 13#include <asm/segment.h>
@@ -125,7 +126,7 @@ void acpi_restore_state_mem(void)
125 */ 126 */
126void __init acpi_reserve_wakeup_memory(void) 127void __init acpi_reserve_wakeup_memory(void)
127{ 128{
128 unsigned long mem; 129 phys_addr_t mem;
129 130
130 if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { 131 if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) {
131 printk(KERN_ERR 132 printk(KERN_ERR
@@ -133,15 +134,15 @@ void __init acpi_reserve_wakeup_memory(void)
133 return; 134 return;
134 } 135 }
135 136
136 mem = find_e820_area(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); 137 mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE);
137 138
138 if (mem == -1L) { 139 if (mem == MEMBLOCK_ERROR) {
139 printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); 140 printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
140 return; 141 return;
141 } 142 }
142 acpi_realmode = (unsigned long) phys_to_virt(mem); 143 acpi_realmode = (unsigned long) phys_to_virt(mem);
143 acpi_wakeup_address = mem; 144 acpi_wakeup_address = mem;
144 reserve_early(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); 145 memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
145} 146}
146 147
147 148
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index 3e28401f161c..960f26ab5c9f 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -26,6 +26,7 @@
26#include <linux/nodemask.h> 26#include <linux/nodemask.h>
27#include <linux/topology.h> 27#include <linux/topology.h>
28#include <linux/bootmem.h> 28#include <linux/bootmem.h>
29#include <linux/memblock.h>
29#include <linux/threads.h> 30#include <linux/threads.h>
30#include <linux/cpumask.h> 31#include <linux/cpumask.h>
31#include <linux/kernel.h> 32#include <linux/kernel.h>
@@ -88,7 +89,7 @@ static inline void numaq_register_node(int node, struct sys_cfg_data *scd)
88 node_end_pfn[node] = 89 node_end_pfn[node] =
89 MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); 90 MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
90 91
91 e820_register_active_regions(node, node_start_pfn[node], 92 memblock_x86_register_active_regions(node, node_start_pfn[node],
92 node_end_pfn[node]); 93 node_end_pfn[node]);
93 94
94 memory_present(node, node_start_pfn[node], node_end_pfn[node]); 95 memory_present(node, node_start_pfn[node], node_end_pfn[node]);
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index c2fa9b8b497e..0fe27d7c6258 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -30,6 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/efi.h> 31#include <linux/efi.h>
32#include <linux/bootmem.h> 32#include <linux/bootmem.h>
33#include <linux/memblock.h>
33#include <linux/spinlock.h> 34#include <linux/spinlock.h>
34#include <linux/uaccess.h> 35#include <linux/uaccess.h>
35#include <linux/time.h> 36#include <linux/time.h>
@@ -275,7 +276,7 @@ static void __init do_add_efi_memmap(void)
275 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 276 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
276} 277}
277 278
278void __init efi_reserve_early(void) 279void __init efi_memblock_x86_reserve_range(void)
279{ 280{
280 unsigned long pmap; 281 unsigned long pmap;
281 282
@@ -290,7 +291,7 @@ void __init efi_reserve_early(void)
290 boot_params.efi_info.efi_memdesc_size; 291 boot_params.efi_info.efi_memdesc_size;
291 memmap.desc_version = boot_params.efi_info.efi_memdesc_version; 292 memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
292 memmap.desc_size = boot_params.efi_info.efi_memdesc_size; 293 memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
293 reserve_early(pmap, pmap + memmap.nr_map * memmap.desc_size, 294 memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size,
294 "EFI memmap"); 295 "EFI memmap");
295} 296}
296 297
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index da60aa8a850f..74e4cf65043e 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -42,7 +42,7 @@ void __init i386_start_kernel(void)
42 memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); 42 memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
43#endif 43#endif
44 44
45 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 45 memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
46 46
47#ifdef CONFIG_BLK_DEV_INITRD 47#ifdef CONFIG_BLK_DEV_INITRD
48 /* Reserve INITRD */ 48 /* Reserve INITRD */
@@ -51,7 +51,7 @@ void __init i386_start_kernel(void)
51 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 51 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
52 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 52 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
53 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); 53 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
54 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 54 memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
55 } 55 }
56#endif 56#endif
57 57
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 8ee930fdeeb9..97adf9828b95 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -101,7 +101,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
101 101
102 memblock_init(); 102 memblock_init();
103 103
104 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 104 memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
105 105
106#ifdef CONFIG_BLK_DEV_INITRD 106#ifdef CONFIG_BLK_DEV_INITRD
107 /* Reserve INITRD */ 107 /* Reserve INITRD */
@@ -110,7 +110,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
110 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; 110 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
111 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; 111 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
112 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); 112 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
113 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 113 memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
114 } 114 }
115#endif 115#endif
116 116
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bbe0aaf77494..a4f01733e879 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -302,7 +302,7 @@ static inline void init_gbpages(void)
302static void __init reserve_brk(void) 302static void __init reserve_brk(void)
303{ 303{
304 if (_brk_end > _brk_start) 304 if (_brk_end > _brk_start)
305 reserve_early(__pa(_brk_start), __pa(_brk_end), "BRK"); 305 memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK");
306 306
307 /* Mark brk area as locked down and no longer taking any 307 /* Mark brk area as locked down and no longer taking any
308 new allocations */ 308 new allocations */
@@ -324,17 +324,16 @@ static void __init relocate_initrd(void)
324 char *p, *q; 324 char *p, *q;
325 325
326 /* We need to move the initrd down into lowmem */ 326 /* We need to move the initrd down into lowmem */
327 ramdisk_here = find_e820_area(0, end_of_lowmem, area_size, 327 ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
328 PAGE_SIZE); 328 PAGE_SIZE);
329 329
330 if (ramdisk_here == -1ULL) 330 if (ramdisk_here == MEMBLOCK_ERROR)
331 panic("Cannot find place for new RAMDISK of size %lld\n", 331 panic("Cannot find place for new RAMDISK of size %lld\n",
332 ramdisk_size); 332 ramdisk_size);
333 333
334 /* Note: this includes all the lowmem currently occupied by 334 /* Note: this includes all the lowmem currently occupied by
335 the initrd, we rely on that fact to keep the data intact. */ 335 the initrd, we rely on that fact to keep the data intact. */
336 reserve_early(ramdisk_here, ramdisk_here + area_size, 336 memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK");
337 "NEW RAMDISK");
338 initrd_start = ramdisk_here + PAGE_OFFSET; 337 initrd_start = ramdisk_here + PAGE_OFFSET;
339 initrd_end = initrd_start + ramdisk_size; 338 initrd_end = initrd_start + ramdisk_size;
340 printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", 339 printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
@@ -390,7 +389,7 @@ static void __init reserve_initrd(void)
390 initrd_start = 0; 389 initrd_start = 0;
391 390
392 if (ramdisk_size >= (end_of_lowmem>>1)) { 391 if (ramdisk_size >= (end_of_lowmem>>1)) {
393 free_early(ramdisk_image, ramdisk_end); 392 memblock_x86_free_range(ramdisk_image, ramdisk_end);
394 printk(KERN_ERR "initrd too large to handle, " 393 printk(KERN_ERR "initrd too large to handle, "
395 "disabling initrd\n"); 394 "disabling initrd\n");
396 return; 395 return;
@@ -413,7 +412,7 @@ static void __init reserve_initrd(void)
413 412
414 relocate_initrd(); 413 relocate_initrd();
415 414
416 free_early(ramdisk_image, ramdisk_end); 415 memblock_x86_free_range(ramdisk_image, ramdisk_end);
417} 416}
418#else 417#else
419static void __init reserve_initrd(void) 418static void __init reserve_initrd(void)
@@ -469,7 +468,7 @@ static void __init e820_reserve_setup_data(void)
469 e820_print_map("reserve setup_data"); 468 e820_print_map("reserve setup_data");
470} 469}
471 470
472static void __init reserve_early_setup_data(void) 471static void __init memblock_x86_reserve_range_setup_data(void)
473{ 472{
474 struct setup_data *data; 473 struct setup_data *data;
475 u64 pa_data; 474 u64 pa_data;
@@ -481,7 +480,7 @@ static void __init reserve_early_setup_data(void)
481 while (pa_data) { 480 while (pa_data) {
482 data = early_memremap(pa_data, sizeof(*data)); 481 data = early_memremap(pa_data, sizeof(*data));
483 sprintf(buf, "setup data %x", data->type); 482 sprintf(buf, "setup data %x", data->type);
484 reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf); 483 memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
485 pa_data = data->next; 484 pa_data = data->next;
486 early_iounmap(data, sizeof(*data)); 485 early_iounmap(data, sizeof(*data));
487 } 486 }
@@ -519,23 +518,23 @@ static void __init reserve_crashkernel(void)
519 if (crash_base <= 0) { 518 if (crash_base <= 0) {
520 const unsigned long long alignment = 16<<20; /* 16M */ 519 const unsigned long long alignment = 16<<20; /* 16M */
521 520
522 crash_base = find_e820_area(alignment, ULONG_MAX, crash_size, 521 crash_base = memblock_find_in_range(alignment, ULONG_MAX, crash_size,
523 alignment); 522 alignment);
524 if (crash_base == -1ULL) { 523 if (crash_base == MEMBLOCK_ERROR) {
525 pr_info("crashkernel reservation failed - No suitable area found.\n"); 524 pr_info("crashkernel reservation failed - No suitable area found.\n");
526 return; 525 return;
527 } 526 }
528 } else { 527 } else {
529 unsigned long long start; 528 unsigned long long start;
530 529
531 start = find_e820_area(crash_base, ULONG_MAX, crash_size, 530 start = memblock_find_in_range(crash_base, ULONG_MAX, crash_size,
532 1<<20); 531 1<<20);
533 if (start != crash_base) { 532 if (start != crash_base) {
534 pr_info("crashkernel reservation failed - memory is in use.\n"); 533 pr_info("crashkernel reservation failed - memory is in use.\n");
535 return; 534 return;
536 } 535 }
537 } 536 }
538 reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL"); 537 memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL");
539 538
540 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 539 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
541 "for crashkernel (System RAM: %ldMB)\n", 540 "for crashkernel (System RAM: %ldMB)\n",
@@ -786,7 +785,7 @@ void __init setup_arch(char **cmdline_p)
786#endif 785#endif
787 4)) { 786 4)) {
788 efi_enabled = 1; 787 efi_enabled = 1;
789 efi_reserve_early(); 788 efi_memblock_x86_reserve_range();
790 } 789 }
791#endif 790#endif
792 791
@@ -846,7 +845,7 @@ void __init setup_arch(char **cmdline_p)
846 vmi_activate(); 845 vmi_activate();
847 846
848 /* after early param, so could get panic from serial */ 847 /* after early param, so could get panic from serial */
849 reserve_early_setup_data(); 848 memblock_x86_reserve_range_setup_data();
850 849
851 if (acpi_mps_check()) { 850 if (acpi_mps_check()) {
852#ifdef CONFIG_X86_LOCAL_APIC 851#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index c652ef62742d..7c2102c2aadf 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -1,7 +1,7 @@
1#include <linux/io.h> 1#include <linux/io.h>
2#include <linux/memblock.h>
2 3
3#include <asm/trampoline.h> 4#include <asm/trampoline.h>
4#include <asm/e820.h>
5 5
6#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) 6#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
7#define __trampinit 7#define __trampinit
@@ -16,15 +16,15 @@ unsigned char *__trampinitdata trampoline_base;
16 16
17void __init reserve_trampoline_memory(void) 17void __init reserve_trampoline_memory(void)
18{ 18{
19 unsigned long mem; 19 phys_addr_t mem;
20 20
21 /* Has to be in very low memory so we can execute real-mode AP code. */ 21 /* Has to be in very low memory so we can execute real-mode AP code. */
22 mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); 22 mem = memblock_find_in_range(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
23 if (mem == -1L) 23 if (mem == MEMBLOCK_ERROR)
24 panic("Cannot allocate trampoline\n"); 24 panic("Cannot allocate trampoline\n");
25 25
26 trampoline_base = __va(mem); 26 trampoline_base = __va(mem);
27 reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); 27 memblock_x86_reserve_range(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
28} 28}
29 29
30/* 30/*
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index b278535b14aa..c0e28a13de7d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -2,6 +2,7 @@
2#include <linux/initrd.h> 2#include <linux/initrd.h>
3#include <linux/ioport.h> 3#include <linux/ioport.h>
4#include <linux/swap.h> 4#include <linux/swap.h>
5#include <linux/memblock.h>
5 6
6#include <asm/cacheflush.h> 7#include <asm/cacheflush.h>
7#include <asm/e820.h> 8#include <asm/e820.h>
@@ -33,6 +34,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
33 int use_gbpages) 34 int use_gbpages)
34{ 35{
35 unsigned long puds, pmds, ptes, tables, start; 36 unsigned long puds, pmds, ptes, tables, start;
37 phys_addr_t base;
36 38
37 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 39 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
38 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 40 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
@@ -75,12 +77,12 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
75#else 77#else
76 start = 0x8000; 78 start = 0x8000;
77#endif 79#endif
78 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT, 80 base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT,
79 tables, PAGE_SIZE); 81 tables, PAGE_SIZE);
80 if (e820_table_start == -1UL) 82 if (base == MEMBLOCK_ERROR)
81 panic("Cannot find space for the kernel page tables"); 83 panic("Cannot find space for the kernel page tables");
82 84
83 e820_table_start >>= PAGE_SHIFT; 85 e820_table_start = base >> PAGE_SHIFT;
84 e820_table_end = e820_table_start; 86 e820_table_end = e820_table_start;
85 e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); 87 e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
86 88
@@ -299,7 +301,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
299 __flush_tlb_all(); 301 __flush_tlb_all();
300 302
301 if (!after_bootmem && e820_table_end > e820_table_start) 303 if (!after_bootmem && e820_table_end > e820_table_start)
302 reserve_early(e820_table_start << PAGE_SHIFT, 304 memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT,
303 e820_table_end << PAGE_SHIFT, "PGTABLE"); 305 e820_table_end << PAGE_SHIFT, "PGTABLE");
304 306
305 if (!after_bootmem) 307 if (!after_bootmem)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 90e054589aae..63b09bae2509 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -25,6 +25,7 @@
25#include <linux/pfn.h> 25#include <linux/pfn.h>
26#include <linux/poison.h> 26#include <linux/poison.h>
27#include <linux/bootmem.h> 27#include <linux/bootmem.h>
28#include <linux/memblock.h>
28#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
29#include <linux/memory_hotplug.h> 30#include <linux/memory_hotplug.h>
30#include <linux/initrd.h> 31#include <linux/initrd.h>
@@ -712,14 +713,14 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
712 highstart_pfn = highend_pfn = max_pfn; 713 highstart_pfn = highend_pfn = max_pfn;
713 if (max_pfn > max_low_pfn) 714 if (max_pfn > max_low_pfn)
714 highstart_pfn = max_low_pfn; 715 highstart_pfn = max_low_pfn;
715 e820_register_active_regions(0, 0, highend_pfn); 716 memblock_x86_register_active_regions(0, 0, highend_pfn);
716 sparse_memory_present_with_active_regions(0); 717 sparse_memory_present_with_active_regions(0);
717 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 718 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
718 pages_to_mb(highend_pfn - highstart_pfn)); 719 pages_to_mb(highend_pfn - highstart_pfn));
719 num_physpages = highend_pfn; 720 num_physpages = highend_pfn;
720 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 721 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
721#else 722#else
722 e820_register_active_regions(0, 0, max_low_pfn); 723 memblock_x86_register_active_regions(0, 0, max_low_pfn);
723 sparse_memory_present_with_active_regions(0); 724 sparse_memory_present_with_active_regions(0);
724 num_physpages = max_low_pfn; 725 num_physpages = max_low_pfn;
725 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 726 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
@@ -776,16 +777,16 @@ void __init setup_bootmem_allocator(void)
776{ 777{
777#ifndef CONFIG_NO_BOOTMEM 778#ifndef CONFIG_NO_BOOTMEM
778 int nodeid; 779 int nodeid;
779 unsigned long bootmap_size, bootmap; 780 phys_addr_t bootmap_size, bootmap;
780 /* 781 /*
781 * Initialize the boot-time allocator (with low memory only): 782 * Initialize the boot-time allocator (with low memory only):
782 */ 783 */
783 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT; 784 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
784 bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size, 785 bootmap = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
785 PAGE_SIZE); 786 PAGE_SIZE);
786 if (bootmap == -1L) 787 if (bootmap == MEMBLOCK_ERROR)
787 panic("Cannot find bootmem map of size %ld\n", bootmap_size); 788 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
788 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); 789 memblock_x86_reserve_range(bootmap, bootmap + bootmap_size, "BOOTMAP");
789#endif 790#endif
790 791
791 printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 792 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
@@ -1069,3 +1070,4 @@ void mark_rodata_ro(void)
1069#endif 1070#endif
1070} 1071}
1071#endif 1072#endif
1073
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 634fa0884a41..592b2368062d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -21,6 +21,7 @@
21#include <linux/initrd.h> 21#include <linux/initrd.h>
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/memblock.h>
24#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
25#include <linux/pci.h> 26#include <linux/pci.h>
26#include <linux/pfn.h> 27#include <linux/pfn.h>
@@ -577,18 +578,18 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
577 unsigned long bootmap_size, bootmap; 578 unsigned long bootmap_size, bootmap;
578 579
579 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; 580 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
580 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, 581 bootmap = memblock_find_in_range(0, end_pfn<<PAGE_SHIFT, bootmap_size,
581 PAGE_SIZE); 582 PAGE_SIZE);
582 if (bootmap == -1L) 583 if (bootmap == MEMBLOCK_ERROR)
583 panic("Cannot find bootmem map of size %ld\n", bootmap_size); 584 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
584 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); 585 memblock_x86_reserve_range(bootmap, bootmap + bootmap_size, "BOOTMAP");
585 /* don't touch min_low_pfn */ 586 /* don't touch min_low_pfn */
586 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT, 587 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
587 0, end_pfn); 588 0, end_pfn);
588 e820_register_active_regions(0, start_pfn, end_pfn); 589 memblock_x86_register_active_regions(0, start_pfn, end_pfn);
589 free_bootmem_with_active_regions(0, end_pfn); 590 free_bootmem_with_active_regions(0, end_pfn);
590#else 591#else
591 e820_register_active_regions(0, start_pfn, end_pfn); 592 memblock_x86_register_active_regions(0, start_pfn, end_pfn);
592#endif 593#endif
593} 594}
594#endif 595#endif
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 970ed579d4e4..966de9372e8c 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -11,6 +11,8 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/nodemask.h> 13#include <linux/nodemask.h>
14#include <linux/memblock.h>
15
14#include <asm/io.h> 16#include <asm/io.h>
15#include <linux/pci_ids.h> 17#include <linux/pci_ids.h>
16#include <linux/acpi.h> 18#include <linux/acpi.h>
@@ -222,7 +224,7 @@ int __init k8_scan_nodes(void)
222 for_each_node_mask(i, node_possible_map) { 224 for_each_node_mask(i, node_possible_map) {
223 int j; 225 int j;
224 226
225 e820_register_active_regions(i, 227 memblock_x86_register_active_regions(i,
226 nodes[i].start >> PAGE_SHIFT, 228 nodes[i].start >> PAGE_SHIFT,
227 nodes[i].end >> PAGE_SHIFT); 229 nodes[i].end >> PAGE_SHIFT);
228 for (j = apicid_base; j < cores + apicid_base; j++) 230 for (j = apicid_base; j < cores + apicid_base; j++)
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 18d244f70205..92faf3a1c53e 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -6,8 +6,7 @@
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/pfn.h> 8#include <linux/pfn.h>
9 9#include <linux/memblock.h>
10#include <asm/e820.h>
11 10
12static u64 patterns[] __initdata = { 11static u64 patterns[] __initdata = {
13 0, 12 0,
@@ -35,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
35 (unsigned long long) pattern, 34 (unsigned long long) pattern,
36 (unsigned long long) start_bad, 35 (unsigned long long) start_bad,
37 (unsigned long long) end_bad); 36 (unsigned long long) end_bad);
38 reserve_early(start_bad, end_bad, "BAD RAM"); 37 memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM");
39} 38}
40 39
41static void __init memtest(u64 pattern, u64 start_phys, u64 size) 40static void __init memtest(u64 pattern, u64 start_phys, u64 size)
@@ -74,7 +73,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end)
74 u64 size = 0; 73 u64 size = 0;
75 74
76 while (start < end) { 75 while (start < end) {
77 start = find_e820_area_size(start, &size, 1); 76 start = memblock_x86_find_in_range_size(start, &size, 1);
78 77
79 /* done ? */ 78 /* done ? */
80 if (start >= end) 79 if (start >= end)
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 809baaaf48b1..ddf9730b2061 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/bootmem.h> 26#include <linux/bootmem.h>
27#include <linux/memblock.h>
27#include <linux/mmzone.h> 28#include <linux/mmzone.h>
28#include <linux/highmem.h> 29#include <linux/highmem.h>
29#include <linux/initrd.h> 30#include <linux/initrd.h>
@@ -120,7 +121,7 @@ int __init get_memcfg_numa_flat(void)
120 121
121 node_start_pfn[0] = 0; 122 node_start_pfn[0] = 0;
122 node_end_pfn[0] = max_pfn; 123 node_end_pfn[0] = max_pfn;
123 e820_register_active_regions(0, 0, max_pfn); 124 memblock_x86_register_active_regions(0, 0, max_pfn);
124 memory_present(0, 0, max_pfn); 125 memory_present(0, 0, max_pfn);
125 node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); 126 node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
126 127
@@ -161,14 +162,14 @@ static void __init allocate_pgdat(int nid)
161 NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; 162 NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
162 else { 163 else {
163 unsigned long pgdat_phys; 164 unsigned long pgdat_phys;
164 pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT, 165 pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT,
165 max_pfn_mapped<<PAGE_SHIFT, 166 max_pfn_mapped<<PAGE_SHIFT,
166 sizeof(pg_data_t), 167 sizeof(pg_data_t),
167 PAGE_SIZE); 168 PAGE_SIZE);
168 NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT)); 169 NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
169 memset(buf, 0, sizeof(buf)); 170 memset(buf, 0, sizeof(buf));
170 sprintf(buf, "NODE_DATA %d", nid); 171 sprintf(buf, "NODE_DATA %d", nid);
171 reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); 172 memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf);
172 } 173 }
173 printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", 174 printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n",
174 nid, (unsigned long)NODE_DATA(nid)); 175 nid, (unsigned long)NODE_DATA(nid));
@@ -291,15 +292,15 @@ static __init unsigned long calculate_numa_remap_pages(void)
291 PTRS_PER_PTE); 292 PTRS_PER_PTE);
292 node_kva_target <<= PAGE_SHIFT; 293 node_kva_target <<= PAGE_SHIFT;
293 do { 294 do {
294 node_kva_final = find_e820_area(node_kva_target, 295 node_kva_final = memblock_find_in_range(node_kva_target,
295 ((u64)node_end_pfn[nid])<<PAGE_SHIFT, 296 ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
296 ((u64)size)<<PAGE_SHIFT, 297 ((u64)size)<<PAGE_SHIFT,
297 LARGE_PAGE_BYTES); 298 LARGE_PAGE_BYTES);
298 node_kva_target -= LARGE_PAGE_BYTES; 299 node_kva_target -= LARGE_PAGE_BYTES;
299 } while (node_kva_final == -1ULL && 300 } while (node_kva_final == MEMBLOCK_ERROR &&
300 (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); 301 (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
301 302
302 if (node_kva_final == -1ULL) 303 if (node_kva_final == MEMBLOCK_ERROR)
303 panic("Can not get kva ram\n"); 304 panic("Can not get kva ram\n");
304 305
305 node_remap_size[nid] = size; 306 node_remap_size[nid] = size;
@@ -318,9 +319,9 @@ static __init unsigned long calculate_numa_remap_pages(void)
318 * but we could have some hole in high memory, and it will only 319 * but we could have some hole in high memory, and it will only
319 * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide 320 * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
320 * to use it as free. 321 * to use it as free.
321 * So reserve_early here, hope we don't run out of that array 322 * So memblock_x86_reserve_range here, hope we don't run out of that array
322 */ 323 */
323 reserve_early(node_kva_final, 324 memblock_x86_reserve_range(node_kva_final,
324 node_kva_final+(((u64)size)<<PAGE_SHIFT), 325 node_kva_final+(((u64)size)<<PAGE_SHIFT),
325 "KVA RAM"); 326 "KVA RAM");
326 327
@@ -367,14 +368,14 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
367 368
368 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); 369 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
369 do { 370 do {
370 kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT, 371 kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
371 max_low_pfn<<PAGE_SHIFT, 372 max_low_pfn<<PAGE_SHIFT,
372 kva_pages<<PAGE_SHIFT, 373 kva_pages<<PAGE_SHIFT,
373 PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; 374 PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
374 kva_target_pfn -= PTRS_PER_PTE; 375 kva_target_pfn -= PTRS_PER_PTE;
375 } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn); 376 } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);
376 377
377 if (kva_start_pfn == -1UL) 378 if (kva_start_pfn == MEMBLOCK_ERROR)
378 panic("Can not get kva space\n"); 379 panic("Can not get kva space\n");
379 380
380 printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", 381 printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n",
@@ -382,7 +383,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
382 printk(KERN_INFO "max_pfn = %lx\n", max_pfn); 383 printk(KERN_INFO "max_pfn = %lx\n", max_pfn);
383 384
384 /* avoid clash with initrd */ 385 /* avoid clash with initrd */
385 reserve_early(kva_start_pfn<<PAGE_SHIFT, 386 memblock_x86_reserve_range(kva_start_pfn<<PAGE_SHIFT,
386 (kva_start_pfn + kva_pages)<<PAGE_SHIFT, 387 (kva_start_pfn + kva_pages)<<PAGE_SHIFT,
387 "KVA PG"); 388 "KVA PG");
388#ifdef CONFIG_HIGHMEM 389#ifdef CONFIG_HIGHMEM
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 3d54f9f95d46..984b1ff7db44 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -87,16 +87,16 @@ static int __init allocate_cachealigned_memnodemap(void)
87 87
88 addr = 0x8000; 88 addr = 0x8000;
89 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); 89 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
90 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, 90 nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT,
91 nodemap_size, L1_CACHE_BYTES); 91 nodemap_size, L1_CACHE_BYTES);
92 if (nodemap_addr == -1UL) { 92 if (nodemap_addr == MEMBLOCK_ERROR) {
93 printk(KERN_ERR 93 printk(KERN_ERR
94 "NUMA: Unable to allocate Memory to Node hash map\n"); 94 "NUMA: Unable to allocate Memory to Node hash map\n");
95 nodemap_addr = nodemap_size = 0; 95 nodemap_addr = nodemap_size = 0;
96 return -1; 96 return -1;
97 } 97 }
98 memnodemap = phys_to_virt(nodemap_addr); 98 memnodemap = phys_to_virt(nodemap_addr);
99 reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); 99 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
100 100
101 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", 101 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
102 nodemap_addr, nodemap_addr + nodemap_size); 102 nodemap_addr, nodemap_addr + nodemap_size);
@@ -227,7 +227,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
227 if (node_data[nodeid] == NULL) 227 if (node_data[nodeid] == NULL)
228 return; 228 return;
229 nodedata_phys = __pa(node_data[nodeid]); 229 nodedata_phys = __pa(node_data[nodeid]);
230 reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); 230 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
231 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, 231 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
232 nodedata_phys + pgdat_size - 1); 232 nodedata_phys + pgdat_size - 1);
233 nid = phys_to_nid(nodedata_phys); 233 nid = phys_to_nid(nodedata_phys);
@@ -246,7 +246,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
246 * Find a place for the bootmem map 246 * Find a place for the bootmem map
247 * nodedata_phys could be on other nodes by alloc_bootmem, 247 * nodedata_phys could be on other nodes by alloc_bootmem,
248 * so need to sure bootmap_start not to be small, otherwise 248 * so need to sure bootmap_start not to be small, otherwise
249 * early_node_mem will get that with find_e820_area instead 249 * early_node_mem will get that with memblock_find_in_range instead
250 * of alloc_bootmem, that could clash with reserved range 250 * of alloc_bootmem, that could clash with reserved range
251 */ 251 */
252 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); 252 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
@@ -258,12 +258,12 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
258 bootmap = early_node_mem(nodeid, bootmap_start, end, 258 bootmap = early_node_mem(nodeid, bootmap_start, end,
259 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); 259 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
260 if (bootmap == NULL) { 260 if (bootmap == NULL) {
261 free_early(nodedata_phys, nodedata_phys + pgdat_size); 261 memblock_x86_free_range(nodedata_phys, nodedata_phys + pgdat_size);
262 node_data[nodeid] = NULL; 262 node_data[nodeid] = NULL;
263 return; 263 return;
264 } 264 }
265 bootmap_start = __pa(bootmap); 265 bootmap_start = __pa(bootmap);
266 reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT), 266 memblock_x86_reserve_range(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
267 "BOOTMAP"); 267 "BOOTMAP");
268 268
269 bootmap_size = init_bootmem_node(NODE_DATA(nodeid), 269 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
@@ -417,7 +417,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
417 nr_nodes = MAX_NUMNODES; 417 nr_nodes = MAX_NUMNODES;
418 } 418 }
419 419
420 size = (max_addr - addr - e820_hole_size(addr, max_addr)) / nr_nodes; 420 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
421 /* 421 /*
422 * Calculate the number of big nodes that can be allocated as a result 422 * Calculate the number of big nodes that can be allocated as a result
423 * of consolidating the remainder. 423 * of consolidating the remainder.
@@ -453,7 +453,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
453 * non-reserved memory is less than the per-node size. 453 * non-reserved memory is less than the per-node size.
454 */ 454 */
455 while (end - physnodes[i].start - 455 while (end - physnodes[i].start -
456 e820_hole_size(physnodes[i].start, end) < size) { 456 memblock_x86_hole_size(physnodes[i].start, end) < size) {
457 end += FAKE_NODE_MIN_SIZE; 457 end += FAKE_NODE_MIN_SIZE;
458 if (end > physnodes[i].end) { 458 if (end > physnodes[i].end) {
459 end = physnodes[i].end; 459 end = physnodes[i].end;
@@ -467,7 +467,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
467 * this one must extend to the boundary. 467 * this one must extend to the boundary.
468 */ 468 */
469 if (end < dma32_end && dma32_end - end - 469 if (end < dma32_end && dma32_end - end -
470 e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) 470 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
471 end = dma32_end; 471 end = dma32_end;
472 472
473 /* 473 /*
@@ -476,7 +476,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
476 * physical node. 476 * physical node.
477 */ 477 */
478 if (physnodes[i].end - end - 478 if (physnodes[i].end - end -
479 e820_hole_size(end, physnodes[i].end) < size) 479 memblock_x86_hole_size(end, physnodes[i].end) < size)
480 end = physnodes[i].end; 480 end = physnodes[i].end;
481 481
482 /* 482 /*
@@ -504,7 +504,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
504{ 504{
505 u64 end = start + size; 505 u64 end = start + size;
506 506
507 while (end - start - e820_hole_size(start, end) < size) { 507 while (end - start - memblock_x86_hole_size(start, end) < size) {
508 end += FAKE_NODE_MIN_SIZE; 508 end += FAKE_NODE_MIN_SIZE;
509 if (end > max_addr) { 509 if (end > max_addr) {
510 end = max_addr; 510 end = max_addr;
@@ -533,7 +533,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
533 * creates a uniform distribution of node sizes across the entire 533 * creates a uniform distribution of node sizes across the entire
534 * machine (but not necessarily over physical nodes). 534 * machine (but not necessarily over physical nodes).
535 */ 535 */
536 min_size = (max_addr - addr - e820_hole_size(addr, max_addr)) / 536 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
537 MAX_NUMNODES; 537 MAX_NUMNODES;
538 min_size = max(min_size, FAKE_NODE_MIN_SIZE); 538 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
539 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) 539 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
@@ -566,7 +566,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
566 * this one must extend to the boundary. 566 * this one must extend to the boundary.
567 */ 567 */
568 if (end < dma32_end && dma32_end - end - 568 if (end < dma32_end && dma32_end - end -
569 e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) 569 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
570 end = dma32_end; 570 end = dma32_end;
571 571
572 /* 572 /*
@@ -575,7 +575,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
575 * physical node. 575 * physical node.
576 */ 576 */
577 if (physnodes[i].end - end - 577 if (physnodes[i].end - end -
578 e820_hole_size(end, physnodes[i].end) < size) 578 memblock_x86_hole_size(end, physnodes[i].end) < size)
579 end = physnodes[i].end; 579 end = physnodes[i].end;
580 580
581 /* 581 /*
@@ -639,7 +639,7 @@ static int __init numa_emulation(unsigned long start_pfn,
639 */ 639 */
640 remove_all_active_ranges(); 640 remove_all_active_ranges();
641 for_each_node_mask(i, node_possible_map) { 641 for_each_node_mask(i, node_possible_map) {
642 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 642 memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
643 nodes[i].end >> PAGE_SHIFT); 643 nodes[i].end >> PAGE_SHIFT);
644 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 644 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
645 } 645 }
@@ -692,7 +692,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
692 node_set(0, node_possible_map); 692 node_set(0, node_possible_map);
693 for (i = 0; i < nr_cpu_ids; i++) 693 for (i = 0; i < nr_cpu_ids; i++)
694 numa_set_node(i, 0); 694 numa_set_node(i, 0);
695 e820_register_active_regions(0, start_pfn, last_pfn); 695 memblock_x86_register_active_regions(0, start_pfn, last_pfn);
696 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); 696 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
697} 697}
698 698
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 9324f13492d5..a17dffd136c1 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -25,6 +25,7 @@
25 */ 25 */
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/bootmem.h> 27#include <linux/bootmem.h>
28#include <linux/memblock.h>
28#include <linux/mmzone.h> 29#include <linux/mmzone.h>
29#include <linux/acpi.h> 30#include <linux/acpi.h>
30#include <linux/nodemask.h> 31#include <linux/nodemask.h>
@@ -264,7 +265,7 @@ int __init get_memcfg_from_srat(void)
264 if (node_read_chunk(chunk->nid, chunk)) 265 if (node_read_chunk(chunk->nid, chunk))
265 continue; 266 continue;
266 267
267 e820_register_active_regions(chunk->nid, chunk->start_pfn, 268 memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn,
268 min(chunk->end_pfn, max_pfn)); 269 min(chunk->end_pfn, max_pfn));
269 } 270 }
270 /* for out of order entries in SRAT */ 271 /* for out of order entries in SRAT */
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index f9897f7a9ef1..7f44eb62a5e9 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/topology.h> 17#include <linux/topology.h>
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <linux/memblock.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
20#include <asm/proto.h> 21#include <asm/proto.h>
21#include <asm/numa.h> 22#include <asm/numa.h>
@@ -98,15 +99,15 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
98 unsigned long phys; 99 unsigned long phys;
99 100
100 length = slit->header.length; 101 length = slit->header.length;
101 phys = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, length, 102 phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length,
102 PAGE_SIZE); 103 PAGE_SIZE);
103 104
104 if (phys == -1L) 105 if (phys == MEMBLOCK_ERROR)
105 panic(" Can not save slit!\n"); 106 panic(" Can not save slit!\n");
106 107
107 acpi_slit = __va(phys); 108 acpi_slit = __va(phys);
108 memcpy(acpi_slit, slit, length); 109 memcpy(acpi_slit, slit, length);
109 reserve_early(phys, phys + length, "ACPI SLIT"); 110 memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT");
110} 111}
111 112
112/* Callback for Proximity Domain -> x2APIC mapping */ 113/* Callback for Proximity Domain -> x2APIC mapping */
@@ -324,7 +325,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
324 pxmram = 0; 325 pxmram = 0;
325 } 326 }
326 327
327 e820ram = max_pfn - (e820_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT); 328 e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
328 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ 329 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
329 if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { 330 if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) {
330 printk(KERN_ERR 331 printk(KERN_ERR
@@ -421,7 +422,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
421 } 422 }
422 423
423 for_each_node_mask(i, nodes_parsed) 424 for_each_node_mask(i, nodes_parsed)
424 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 425 memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
425 nodes[i].end >> PAGE_SHIFT); 426 nodes[i].end >> PAGE_SHIFT);
426 /* for out of order entries in SRAT */ 427 /* for out of order entries in SRAT */
427 sort_node_map(); 428 sort_node_map();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 914f04695ce5..b511f1986911 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -44,6 +44,7 @@
44#include <linux/bug.h> 44#include <linux/bug.h>
45#include <linux/module.h> 45#include <linux/module.h>
46#include <linux/gfp.h> 46#include <linux/gfp.h>
47#include <linux/memblock.h>
47 48
48#include <asm/pgtable.h> 49#include <asm/pgtable.h>
49#include <asm/tlbflush.h> 50#include <asm/tlbflush.h>
@@ -1735,7 +1736,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1735 __xen_write_cr3(true, __pa(pgd)); 1736 __xen_write_cr3(true, __pa(pgd));
1736 xen_mc_issue(PARAVIRT_LAZY_CPU); 1737 xen_mc_issue(PARAVIRT_LAZY_CPU);
1737 1738
1738 reserve_early(__pa(xen_start_info->pt_base), 1739 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
1739 __pa(xen_start_info->pt_base + 1740 __pa(xen_start_info->pt_base +
1740 xen_start_info->nr_pt_frames * PAGE_SIZE), 1741 xen_start_info->nr_pt_frames * PAGE_SIZE),
1741 "XEN PAGETABLES"); 1742 "XEN PAGETABLES");
@@ -1773,7 +1774,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1773 1774
1774 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); 1775 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1775 1776
1776 reserve_early(__pa(xen_start_info->pt_base), 1777 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
1777 __pa(xen_start_info->pt_base + 1778 __pa(xen_start_info->pt_base +
1778 xen_start_info->nr_pt_frames * PAGE_SIZE), 1779 xen_start_info->nr_pt_frames * PAGE_SIZE),
1779 "XEN PAGETABLES"); 1780 "XEN PAGETABLES");
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index ad0047f47cd4..2ac8f29f89cb 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -8,6 +8,7 @@
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/pm.h> 10#include <linux/pm.h>
11#include <linux/memblock.h>
11 12
12#include <asm/elf.h> 13#include <asm/elf.h>
13#include <asm/vdso.h> 14#include <asm/vdso.h>
@@ -61,7 +62,7 @@ char * __init xen_memory_setup(void)
61 * - xen_start_info 62 * - xen_start_info
62 * See comment above "struct start_info" in <xen/interface/xen.h> 63 * See comment above "struct start_info" in <xen/interface/xen.h>
63 */ 64 */
64 reserve_early(__pa(xen_start_info->mfn_list), 65 memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
65 __pa(xen_start_info->pt_base), 66 __pa(xen_start_info->pt_base),
66 "XEN START INFO"); 67 "XEN START INFO");
67 68
diff --git a/mm/bootmem.c b/mm/bootmem.c
index fda01a2c31af..13b0caa9793c 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -436,7 +436,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
436{ 436{
437#ifdef CONFIG_NO_BOOTMEM 437#ifdef CONFIG_NO_BOOTMEM
438 kmemleak_free_part(__va(physaddr), size); 438 kmemleak_free_part(__va(physaddr), size);
439 free_early(physaddr, physaddr + size); 439 memblock_x86_free_range(physaddr, physaddr + size);
440#else 440#else
441 unsigned long start, end; 441 unsigned long start, end;
442 442
@@ -462,7 +462,7 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
462{ 462{
463#ifdef CONFIG_NO_BOOTMEM 463#ifdef CONFIG_NO_BOOTMEM
464 kmemleak_free_part(__va(addr), size); 464 kmemleak_free_part(__va(addr), size);
465 free_early(addr, addr + size); 465 memblock_x86_free_range(addr, addr + size);
466#else 466#else
467 unsigned long start, end; 467 unsigned long start, end;
468 468