diff options
Diffstat (limited to 'arch')
56 files changed, 828 insertions, 666 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7185b00650fe..8504906b147f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -150,6 +150,7 @@ static void __init find_limits(struct meminfo *mi, | |||
150 | static void __init arm_bootmem_init(struct meminfo *mi, | 150 | static void __init arm_bootmem_init(struct meminfo *mi, |
151 | unsigned long start_pfn, unsigned long end_pfn) | 151 | unsigned long start_pfn, unsigned long end_pfn) |
152 | { | 152 | { |
153 | struct memblock_region *reg; | ||
153 | unsigned int boot_pages; | 154 | unsigned int boot_pages; |
154 | phys_addr_t bitmap; | 155 | phys_addr_t bitmap; |
155 | pg_data_t *pgdat; | 156 | pg_data_t *pgdat; |
@@ -180,13 +181,13 @@ static void __init arm_bootmem_init(struct meminfo *mi, | |||
180 | /* | 181 | /* |
181 | * Reserve the memblock reserved regions in bootmem. | 182 | * Reserve the memblock reserved regions in bootmem. |
182 | */ | 183 | */ |
183 | for (i = 0; i < memblock.reserved.cnt; i++) { | 184 | for_each_memblock(reserved, reg) { |
184 | phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); | 185 | phys_addr_t start = memblock_region_base_pfn(reg); |
185 | if (start >= start_pfn && | 186 | phys_addr_t end = memblock_region_end_pfn(reg); |
186 | memblock_end_pfn(&memblock.reserved, i) <= end_pfn) | 187 | if (start >= start_pfn && end <= end_pfn) |
187 | reserve_bootmem_node(pgdat, __pfn_to_phys(start), | 188 | reserve_bootmem_node(pgdat, __pfn_to_phys(start), |
188 | memblock_size_bytes(&memblock.reserved, i), | 189 | (end - start) << PAGE_SHIFT, |
189 | BOOTMEM_DEFAULT); | 190 | BOOTMEM_DEFAULT); |
190 | } | 191 | } |
191 | } | 192 | } |
192 | 193 | ||
@@ -237,20 +238,7 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, | |||
237 | #ifndef CONFIG_SPARSEMEM | 238 | #ifndef CONFIG_SPARSEMEM |
238 | int pfn_valid(unsigned long pfn) | 239 | int pfn_valid(unsigned long pfn) |
239 | { | 240 | { |
240 | struct memblock_region *mem = &memblock.memory; | 241 | return memblock_is_memory(pfn << PAGE_SHIFT); |
241 | unsigned int left = 0, right = mem->cnt; | ||
242 | |||
243 | do { | ||
244 | unsigned int mid = (right + left) / 2; | ||
245 | |||
246 | if (pfn < memblock_start_pfn(mem, mid)) | ||
247 | right = mid; | ||
248 | else if (pfn >= memblock_end_pfn(mem, mid)) | ||
249 | left = mid + 1; | ||
250 | else | ||
251 | return 1; | ||
252 | } while (left < right); | ||
253 | return 0; | ||
254 | } | 242 | } |
255 | EXPORT_SYMBOL(pfn_valid); | 243 | EXPORT_SYMBOL(pfn_valid); |
256 | 244 | ||
@@ -260,10 +248,12 @@ static void arm_memory_present(void) | |||
260 | #else | 248 | #else |
261 | static void arm_memory_present(void) | 249 | static void arm_memory_present(void) |
262 | { | 250 | { |
251 | struct memblock_region *reg; | ||
263 | int i; | 252 | int i; |
264 | for (i = 0; i < memblock.memory.cnt; i++) | 253 | |
265 | memory_present(0, memblock_start_pfn(&memblock.memory, i), | 254 | for_each_memblock(memory, reg) { |
266 | memblock_end_pfn(&memblock.memory, i)); | 255 | memory_present(0, memblock_region_base_pfn(reg), |
256 | memblock_region_end_pfn(reg)); | ||
267 | } | 257 | } |
268 | #endif | 258 | #endif |
269 | 259 | ||
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index 0054b9501a53..441af2bdfd15 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c | |||
@@ -173,11 +173,7 @@ static int check_fbmem_region(int region_idx, struct omapfb_mem_region *rg, | |||
173 | 173 | ||
174 | static int valid_sdram(unsigned long addr, unsigned long size) | 174 | static int valid_sdram(unsigned long addr, unsigned long size) |
175 | { | 175 | { |
176 | struct memblock_property res; | 176 | return memblock_region_is_memory(addr, size); |
177 | |||
178 | res.base = addr; | ||
179 | res.size = size; | ||
180 | return !memblock_find(&res) && res.base == addr && res.size == size; | ||
181 | } | 177 | } |
182 | 178 | ||
183 | static int reserve_sdram(unsigned long addr, unsigned long size) | 179 | static int reserve_sdram(unsigned long addr, unsigned long size) |
diff --git a/arch/microblaze/include/asm/memblock.h b/arch/microblaze/include/asm/memblock.h index f9c2fa331d2a..20a8e257c77f 100644 --- a/arch/microblaze/include/asm/memblock.h +++ b/arch/microblaze/include/asm/memblock.h | |||
@@ -9,9 +9,6 @@ | |||
9 | #ifndef _ASM_MICROBLAZE_MEMBLOCK_H | 9 | #ifndef _ASM_MICROBLAZE_MEMBLOCK_H |
10 | #define _ASM_MICROBLAZE_MEMBLOCK_H | 10 | #define _ASM_MICROBLAZE_MEMBLOCK_H |
11 | 11 | ||
12 | /* MEMBLOCK limit is OFF */ | ||
13 | #define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF | ||
14 | |||
15 | #endif /* _ASM_MICROBLAZE_MEMBLOCK_H */ | 12 | #endif /* _ASM_MICROBLAZE_MEMBLOCK_H */ |
16 | 13 | ||
17 | 14 | ||
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 65eb00419d19..840026c5bc56 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -70,16 +70,16 @@ static void __init paging_init(void) | |||
70 | 70 | ||
71 | void __init setup_memory(void) | 71 | void __init setup_memory(void) |
72 | { | 72 | { |
73 | int i; | ||
74 | unsigned long map_size; | 73 | unsigned long map_size; |
74 | struct memblock_region *reg; | ||
75 | |||
75 | #ifndef CONFIG_MMU | 76 | #ifndef CONFIG_MMU |
76 | u32 kernel_align_start, kernel_align_size; | 77 | u32 kernel_align_start, kernel_align_size; |
77 | 78 | ||
78 | /* Find main memory where is the kernel */ | 79 | /* Find main memory where is the kernel */ |
79 | for (i = 0; i < memblock.memory.cnt; i++) { | 80 | for_each_memblock(memory, reg) { |
80 | memory_start = (u32) memblock.memory.region[i].base; | 81 | memory_start = (u32)reg->base; |
81 | memory_end = (u32) memblock.memory.region[i].base | 82 | memory_end = (u32) reg->base + reg->size; |
82 | + (u32) memblock.memory.region[i].size; | ||
83 | if ((memory_start <= (u32)_text) && | 83 | if ((memory_start <= (u32)_text) && |
84 | ((u32)_text <= memory_end)) { | 84 | ((u32)_text <= memory_end)) { |
85 | memory_size = memory_end - memory_start; | 85 | memory_size = memory_end - memory_start; |
@@ -142,12 +142,10 @@ void __init setup_memory(void) | |||
142 | free_bootmem(memory_start, memory_size); | 142 | free_bootmem(memory_start, memory_size); |
143 | 143 | ||
144 | /* reserve allocate blocks */ | 144 | /* reserve allocate blocks */ |
145 | for (i = 0; i < memblock.reserved.cnt; i++) { | 145 | for_each_memblock(reserved, reg) { |
146 | pr_debug("reserved %d - 0x%08x-0x%08x\n", i, | 146 | pr_debug("reserved - 0x%08x-0x%08x\n", |
147 | (u32) memblock.reserved.region[i].base, | 147 | (u32) reg->base, (u32) reg->size); |
148 | (u32) memblock_size_bytes(&memblock.reserved, i)); | 148 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
149 | reserve_bootmem(memblock.reserved.region[i].base, | ||
150 | memblock_size_bytes(&memblock.reserved, i) - 1, BOOTMEM_DEFAULT); | ||
151 | } | 149 | } |
152 | #ifdef CONFIG_MMU | 150 | #ifdef CONFIG_MMU |
153 | init_bootmem_done = 1; | 151 | init_bootmem_done = 1; |
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h index 3c29728b56b1..43efc345065e 100644 --- a/arch/powerpc/include/asm/memblock.h +++ b/arch/powerpc/include/asm/memblock.h | |||
@@ -5,11 +5,4 @@ | |||
5 | 5 | ||
6 | #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) | 6 | #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) |
7 | 7 | ||
8 | #ifdef CONFIG_PPC32 | ||
9 | extern phys_addr_t lowmem_end_addr; | ||
10 | #define MEMBLOCK_REAL_LIMIT lowmem_end_addr | ||
11 | #else | ||
12 | #define MEMBLOCK_REAL_LIMIT 0 | ||
13 | #endif | ||
14 | |||
15 | #endif /* _ASM_POWERPC_MEMBLOCK_H */ | 8 | #endif /* _ASM_POWERPC_MEMBLOCK_H */ |
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 7ebf42ed84a2..bb40a06d3b77 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define _ASM_POWERPC_MMU_H_ | 2 | #define _ASM_POWERPC_MMU_H_ |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/types.h> | ||
6 | |||
5 | #include <asm/asm-compat.h> | 7 | #include <asm/asm-compat.h> |
6 | #include <asm/feature-fixups.h> | 8 | #include <asm/feature-fixups.h> |
7 | 9 | ||
@@ -82,6 +84,16 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; | |||
82 | extern void early_init_mmu(void); | 84 | extern void early_init_mmu(void); |
83 | extern void early_init_mmu_secondary(void); | 85 | extern void early_init_mmu_secondary(void); |
84 | 86 | ||
87 | extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
88 | phys_addr_t first_memblock_size); | ||
89 | |||
90 | #ifdef CONFIG_PPC64 | ||
91 | /* This is our real memory area size on ppc64 server, on embedded, we | ||
92 | * make it match the size our of bolted TLB area | ||
93 | */ | ||
94 | extern u64 ppc64_rma_size; | ||
95 | #endif /* CONFIG_PPC64 */ | ||
96 | |||
85 | #endif /* !__ASSEMBLY__ */ | 97 | #endif /* !__ASSEMBLY__ */ |
86 | 98 | ||
87 | /* The kernel use the constants below to index in the page sizes array. | 99 | /* The kernel use the constants below to index in the page sizes array. |
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a90625f9b485..8278e8bad5a0 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S | |||
@@ -923,11 +923,7 @@ initial_mmu: | |||
923 | mtspr SPRN_PID,r0 | 923 | mtspr SPRN_PID,r0 |
924 | sync | 924 | sync |
925 | 925 | ||
926 | /* Configure and load two entries into TLB slots 62 and 63. | 926 | /* Configure and load one entry into TLB slots 63 */ |
927 | * In case we are pinning TLBs, these are reserved in by the | ||
928 | * other TLB functions. If not reserving, then it doesn't | ||
929 | * matter where they are loaded. | ||
930 | */ | ||
931 | clrrwi r4,r4,10 /* Mask off the real page number */ | 927 | clrrwi r4,r4,10 /* Mask off the real page number */ |
932 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | 928 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ |
933 | 929 | ||
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index d0a26f1770fe..a4e72159234f 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -127,7 +127,7 @@ void __init allocate_pacas(void) | |||
127 | * the first segment. On iSeries they must be within the area mapped | 127 | * the first segment. On iSeries they must be within the area mapped |
128 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. | 128 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. |
129 | */ | 129 | */ |
130 | limit = min(0x10000000ULL, memblock.rmo_size); | 130 | limit = min(0x10000000ULL, ppc64_rma_size); |
131 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 131 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
132 | limit = min(limit, HvPagesToMap * HVPAGESIZE); | 132 | limit = min(limit, HvPagesToMap * HVPAGESIZE); |
133 | 133 | ||
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index fed9bf6187d1..c3c6a8857544 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -66,6 +66,7 @@ | |||
66 | int __initdata iommu_is_off; | 66 | int __initdata iommu_is_off; |
67 | int __initdata iommu_force_on; | 67 | int __initdata iommu_force_on; |
68 | unsigned long tce_alloc_start, tce_alloc_end; | 68 | unsigned long tce_alloc_start, tce_alloc_end; |
69 | u64 ppc64_rma_size; | ||
69 | #endif | 70 | #endif |
70 | 71 | ||
71 | static int __init early_parse_mem(char *p) | 72 | static int __init early_parse_mem(char *p) |
@@ -98,7 +99,7 @@ static void __init move_device_tree(void) | |||
98 | 99 | ||
99 | if ((memory_limit && (start + size) > memory_limit) || | 100 | if ((memory_limit && (start + size) > memory_limit) || |
100 | overlaps_crashkernel(start, size)) { | 101 | overlaps_crashkernel(start, size)) { |
101 | p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); | 102 | p = __va(memblock_alloc(size, PAGE_SIZE)); |
102 | memcpy(p, initial_boot_params, size); | 103 | memcpy(p, initial_boot_params, size); |
103 | initial_boot_params = (struct boot_param_header *)p; | 104 | initial_boot_params = (struct boot_param_header *)p; |
104 | DBG("Moved device tree to 0x%p\n", p); | 105 | DBG("Moved device tree to 0x%p\n", p); |
@@ -492,7 +493,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node, | |||
492 | 493 | ||
493 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | 494 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
494 | { | 495 | { |
495 | #if defined(CONFIG_PPC64) | 496 | #ifdef CONFIG_PPC64 |
496 | if (iommu_is_off) { | 497 | if (iommu_is_off) { |
497 | if (base >= 0x80000000ul) | 498 | if (base >= 0x80000000ul) |
498 | return; | 499 | return; |
@@ -501,9 +502,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) | |||
501 | } | 502 | } |
502 | #endif | 503 | #endif |
503 | 504 | ||
504 | memblock_add(base, size); | 505 | /* First MEMBLOCK added, do some special initializations */ |
505 | 506 | if (memstart_addr == ~(phys_addr_t)0) | |
507 | setup_initial_memory_limit(base, size); | ||
506 | memstart_addr = min((u64)memstart_addr, base); | 508 | memstart_addr = min((u64)memstart_addr, base); |
509 | |||
510 | /* Add the chunk to the MEMBLOCK list */ | ||
511 | memblock_add(base, size); | ||
507 | } | 512 | } |
508 | 513 | ||
509 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | 514 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
@@ -655,7 +660,6 @@ static void __init phyp_dump_reserve_mem(void) | |||
655 | static inline void __init phyp_dump_reserve_mem(void) {} | 660 | static inline void __init phyp_dump_reserve_mem(void) {} |
656 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ | 661 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ |
657 | 662 | ||
658 | |||
659 | void __init early_init_devtree(void *params) | 663 | void __init early_init_devtree(void *params) |
660 | { | 664 | { |
661 | phys_addr_t limit; | 665 | phys_addr_t limit; |
@@ -683,6 +687,7 @@ void __init early_init_devtree(void *params) | |||
683 | 687 | ||
684 | /* Scan memory nodes and rebuild MEMBLOCKs */ | 688 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
685 | memblock_init(); | 689 | memblock_init(); |
690 | |||
686 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 691 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
687 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); | 692 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); |
688 | 693 | ||
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 41048de3c6c3..7333fdbf857b 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -969,7 +969,7 @@ void __init rtas_initialize(void) | |||
969 | */ | 969 | */ |
970 | #ifdef CONFIG_PPC64 | 970 | #ifdef CONFIG_PPC64 |
971 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { | 971 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { |
972 | rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); | 972 | rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); |
973 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); | 973 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); |
974 | } | 974 | } |
975 | #endif | 975 | #endif |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 93666f9cabf1..b86111fe9257 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -246,7 +246,7 @@ static void __init irqstack_early_init(void) | |||
246 | unsigned int i; | 246 | unsigned int i; |
247 | 247 | ||
248 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 248 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
249 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ | 249 | * as the memblock is limited to lowmem by default */ |
250 | for_each_possible_cpu(i) { | 250 | for_each_possible_cpu(i) { |
251 | softirq_ctx[i] = (struct thread_info *) | 251 | softirq_ctx[i] = (struct thread_info *) |
252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e72690ec9b87..2a178b0ebcdf 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -486,7 +486,7 @@ static void __init emergency_stack_init(void) | |||
486 | * bringup, we need to get at them in real mode. This means they | 486 | * bringup, we need to get at them in real mode. This means they |
487 | * must also be within the RMO region. | 487 | * must also be within the RMO region. |
488 | */ | 488 | */ |
489 | limit = min(slb0_limit(), memblock.rmo_size); | 489 | limit = min(slb0_limit(), ppc64_rma_size); |
490 | 490 | ||
491 | for_each_possible_cpu(i) { | 491 | for_each_possible_cpu(i) { |
492 | unsigned long sp; | 492 | unsigned long sp; |
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 1dc2fa5ce1bd..5810967511d4 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/highmem.h> | 37 | #include <linux/highmem.h> |
38 | #include <linux/memblock.h> | ||
38 | 39 | ||
39 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
40 | #include <asm/prom.h> | 41 | #include <asm/prom.h> |
@@ -47,6 +48,7 @@ | |||
47 | #include <asm/bootx.h> | 48 | #include <asm/bootx.h> |
48 | #include <asm/machdep.h> | 49 | #include <asm/machdep.h> |
49 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
51 | |||
50 | #include "mmu_decl.h" | 52 | #include "mmu_decl.h" |
51 | 53 | ||
52 | extern int __map_without_ltlbs; | 54 | extern int __map_without_ltlbs; |
@@ -139,8 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
139 | * coverage with normal-sized pages (or other reasons) do not | 141 | * coverage with normal-sized pages (or other reasons) do not |
140 | * attempt to allocate outside the allowed range. | 142 | * attempt to allocate outside the allowed range. |
141 | */ | 143 | */ |
142 | 144 | memblock_set_current_limit(mapped); | |
143 | __initial_memory_limit_addr = memstart_addr + mapped; | ||
144 | 145 | ||
145 | return mapped; | 146 | return mapped; |
146 | } | 147 | } |
148 | |||
149 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
150 | phys_addr_t first_memblock_size) | ||
151 | { | ||
152 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
153 | * physical on those processors | ||
154 | */ | ||
155 | BUG_ON(first_memblock_base != 0); | ||
156 | |||
157 | /* 40x can only access 16MB at the moment (see head_40x.S) */ | ||
158 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
159 | } | ||
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index d8c6efb32bc6..024acab588fd 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c | |||
@@ -24,6 +24,8 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/memblock.h> | ||
28 | |||
27 | #include <asm/mmu.h> | 29 | #include <asm/mmu.h> |
28 | #include <asm/system.h> | 30 | #include <asm/system.h> |
29 | #include <asm/page.h> | 31 | #include <asm/page.h> |
@@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
213 | return total_lowmem; | 215 | return total_lowmem; |
214 | } | 216 | } |
215 | 217 | ||
218 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
219 | phys_addr_t first_memblock_size) | ||
220 | { | ||
221 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
222 | * physical on those processors | ||
223 | */ | ||
224 | BUG_ON(first_memblock_base != 0); | ||
225 | |||
226 | /* 44x has a 256M TLB entry pinned at boot */ | ||
227 | memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); | ||
228 | } | ||
229 | |||
216 | #ifdef CONFIG_SMP | 230 | #ifdef CONFIG_SMP |
217 | void __cpuinit mmu_init_secondary(int cpu) | 231 | void __cpuinit mmu_init_secondary(int cpu) |
218 | { | 232 | { |
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 4b66a1ece6d8..cde270847e7c 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/init.h> | 40 | #include <linux/init.h> |
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
43 | #include <linux/memblock.h> | ||
43 | 44 | ||
44 | #include <asm/pgalloc.h> | 45 | #include <asm/pgalloc.h> |
45 | #include <asm/prom.h> | 46 | #include <asm/prom.h> |
@@ -213,5 +214,14 @@ void __init adjust_total_lowmem(void) | |||
213 | pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, | 214 | pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, |
214 | (unsigned int)((total_lowmem - __max_low_memory) >> 20)); | 215 | (unsigned int)((total_lowmem - __max_low_memory) >> 20)); |
215 | 216 | ||
216 | __initial_memory_limit_addr = memstart_addr + __max_low_memory; | 217 | memblock_set_current_limit(memstart_addr + __max_low_memory); |
218 | } | ||
219 | |||
220 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
221 | phys_addr_t first_memblock_size) | ||
222 | { | ||
223 | phys_addr_t limit = first_memblock_base + first_memblock_size; | ||
224 | |||
225 | /* 64M mapped initially according to head_fsl_booke.S */ | ||
226 | memblock_set_current_limit(min_t(u64, limit, 0x04000000)); | ||
217 | } | 227 | } |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 09dffe6efa46..83f534d862db 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -588,7 +588,7 @@ static void __init htab_initialize(void) | |||
588 | unsigned long pteg_count; | 588 | unsigned long pteg_count; |
589 | unsigned long prot; | 589 | unsigned long prot; |
590 | unsigned long base = 0, size = 0, limit; | 590 | unsigned long base = 0, size = 0, limit; |
591 | int i; | 591 | struct memblock_region *reg; |
592 | 592 | ||
593 | DBG(" -> htab_initialize()\n"); | 593 | DBG(" -> htab_initialize()\n"); |
594 | 594 | ||
@@ -625,7 +625,7 @@ static void __init htab_initialize(void) | |||
625 | if (machine_is(cell)) | 625 | if (machine_is(cell)) |
626 | limit = 0x80000000; | 626 | limit = 0x80000000; |
627 | else | 627 | else |
628 | limit = 0; | 628 | limit = MEMBLOCK_ALLOC_ANYWHERE; |
629 | 629 | ||
630 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); | 630 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); |
631 | 631 | ||
@@ -649,7 +649,7 @@ static void __init htab_initialize(void) | |||
649 | #ifdef CONFIG_DEBUG_PAGEALLOC | 649 | #ifdef CONFIG_DEBUG_PAGEALLOC |
650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; | 650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; |
651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, | 651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, |
652 | 1, memblock.rmo_size)); | 652 | 1, ppc64_rma_size)); |
653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); | 653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); |
654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
655 | 655 | ||
@@ -659,9 +659,9 @@ static void __init htab_initialize(void) | |||
659 | */ | 659 | */ |
660 | 660 | ||
661 | /* create bolted the linear mapping in the hash table */ | 661 | /* create bolted the linear mapping in the hash table */ |
662 | for (i=0; i < memblock.memory.cnt; i++) { | 662 | for_each_memblock(memory, reg) { |
663 | base = (unsigned long)__va(memblock.memory.region[i].base); | 663 | base = (unsigned long)__va(reg->base); |
664 | size = memblock.memory.region[i].size; | 664 | size = reg->size; |
665 | 665 | ||
666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", | 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", |
667 | base, size, prot); | 667 | base, size, prot); |
@@ -696,7 +696,8 @@ static void __init htab_initialize(void) | |||
696 | #endif /* CONFIG_U3_DART */ | 696 | #endif /* CONFIG_U3_DART */ |
697 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), | 697 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), |
698 | prot, mmu_linear_psize, mmu_kernel_ssize)); | 698 | prot, mmu_linear_psize, mmu_kernel_ssize)); |
699 | } | 699 | } |
700 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | ||
700 | 701 | ||
701 | /* | 702 | /* |
702 | * If we have a memory_limit and we've allocated TCEs then we need to | 703 | * If we have a memory_limit and we've allocated TCEs then we need to |
@@ -1247,3 +1248,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1247 | local_irq_restore(flags); | 1248 | local_irq_restore(flags); |
1248 | } | 1249 | } |
1249 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 1250 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1251 | |||
1252 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
1253 | phys_addr_t first_memblock_size) | ||
1254 | { | ||
1255 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
1256 | * physical on those processors | ||
1257 | */ | ||
1258 | BUG_ON(first_memblock_base != 0); | ||
1259 | |||
1260 | /* On LPAR systems, the first entry is our RMA region, | ||
1261 | * non-LPAR 64-bit hash MMU systems don't have a limitation | ||
1262 | * on real mode access, but using the first entry works well | ||
1263 | * enough. We also clamp it to 1G to avoid some funky things | ||
1264 | * such as RTAS bugs etc... | ||
1265 | */ | ||
1266 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
1267 | |||
1268 | /* Finally limit subsequent allocations */ | ||
1269 | memblock_set_current_limit(ppc64_rma_size); | ||
1270 | } | ||
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6a6975dc2654..742da43b4ab6 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
@@ -92,12 +92,6 @@ int __allow_ioremap_reserved; | |||
92 | unsigned long __max_low_memory = MAX_LOW_MEM; | 92 | unsigned long __max_low_memory = MAX_LOW_MEM; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * address of the limit of what is accessible with initial MMU setup - | ||
96 | * 256MB usually, but only 16MB on 601. | ||
97 | */ | ||
98 | phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; | ||
99 | |||
100 | /* | ||
101 | * Check for command-line options that affect what MMU_init will do. | 95 | * Check for command-line options that affect what MMU_init will do. |
102 | */ | 96 | */ |
103 | void MMU_setup(void) | 97 | void MMU_setup(void) |
@@ -126,13 +120,6 @@ void __init MMU_init(void) | |||
126 | if (ppc_md.progress) | 120 | if (ppc_md.progress) |
127 | ppc_md.progress("MMU:enter", 0x111); | 121 | ppc_md.progress("MMU:enter", 0x111); |
128 | 122 | ||
129 | /* 601 can only access 16MB at the moment */ | ||
130 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
131 | __initial_memory_limit_addr = 0x01000000; | ||
132 | /* 8xx can only access 8MB at the moment */ | ||
133 | if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) | ||
134 | __initial_memory_limit_addr = 0x00800000; | ||
135 | |||
136 | /* parse args from command line */ | 123 | /* parse args from command line */ |
137 | MMU_setup(); | 124 | MMU_setup(); |
138 | 125 | ||
@@ -190,20 +177,18 @@ void __init MMU_init(void) | |||
190 | #ifdef CONFIG_BOOTX_TEXT | 177 | #ifdef CONFIG_BOOTX_TEXT |
191 | btext_unmap(); | 178 | btext_unmap(); |
192 | #endif | 179 | #endif |
180 | |||
181 | /* Shortly after that, the entire linear mapping will be available */ | ||
182 | memblock_set_current_limit(lowmem_end_addr); | ||
193 | } | 183 | } |
194 | 184 | ||
195 | /* This is only called until mem_init is done. */ | 185 | /* This is only called until mem_init is done. */ |
196 | void __init *early_get_page(void) | 186 | void __init *early_get_page(void) |
197 | { | 187 | { |
198 | void *p; | 188 | if (init_bootmem_done) |
199 | 189 | return alloc_bootmem_pages(PAGE_SIZE); | |
200 | if (init_bootmem_done) { | 190 | else |
201 | p = alloc_bootmem_pages(PAGE_SIZE); | 191 | return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); |
202 | } else { | ||
203 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
204 | __initial_memory_limit_addr)); | ||
205 | } | ||
206 | return p; | ||
207 | } | 192 | } |
208 | 193 | ||
209 | /* Free up now-unused memory */ | 194 | /* Free up now-unused memory */ |
@@ -252,3 +237,17 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
252 | } | 237 | } |
253 | #endif | 238 | #endif |
254 | 239 | ||
240 | |||
241 | #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ | ||
242 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
243 | phys_addr_t first_memblock_size) | ||
244 | { | ||
245 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
246 | * physical on those processors | ||
247 | */ | ||
248 | BUG_ON(first_memblock_base != 0); | ||
249 | |||
250 | /* 8xx can only access 8MB at the moment */ | ||
251 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
252 | } | ||
253 | #endif /* CONFIG_8xx */ | ||
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index ace85fa74b29..6374b2196a17 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -330,3 +330,4 @@ int __meminit vmemmap_populate(struct page *start_page, | |||
330 | return 0; | 330 | return 0; |
331 | } | 331 | } |
332 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 332 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
333 | |||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 1a84a8d00005..f661f6c527da 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -82,18 +82,11 @@ int page_is_ram(unsigned long pfn) | |||
82 | return pfn < max_pfn; | 82 | return pfn < max_pfn; |
83 | #else | 83 | #else |
84 | unsigned long paddr = (pfn << PAGE_SHIFT); | 84 | unsigned long paddr = (pfn << PAGE_SHIFT); |
85 | int i; | 85 | struct memblock_region *reg; |
86 | for (i=0; i < memblock.memory.cnt; i++) { | ||
87 | unsigned long base; | ||
88 | 86 | ||
89 | base = memblock.memory.region[i].base; | 87 | for_each_memblock(memory, reg) |
90 | 88 | if (paddr >= reg->base && paddr < (reg->base + reg->size)) | |
91 | if ((paddr >= base) && | ||
92 | (paddr < (base + memblock.memory.region[i].size))) { | ||
93 | return 1; | 89 | return 1; |
94 | } | ||
95 | } | ||
96 | |||
97 | return 0; | 90 | return 0; |
98 | #endif | 91 | #endif |
99 | } | 92 | } |
@@ -149,23 +142,19 @@ int | |||
149 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 142 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
150 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | 143 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
151 | { | 144 | { |
152 | struct memblock_property res; | 145 | struct memblock_region *reg; |
153 | unsigned long pfn, len; | 146 | unsigned long end_pfn = start_pfn + nr_pages; |
154 | u64 end; | 147 | unsigned long tstart, tend; |
155 | int ret = -1; | 148 | int ret = -1; |
156 | 149 | ||
157 | res.base = (u64) start_pfn << PAGE_SHIFT; | 150 | for_each_memblock(memory, reg) { |
158 | res.size = (u64) nr_pages << PAGE_SHIFT; | 151 | tstart = max(start_pfn, memblock_region_base_pfn(reg)); |
159 | 152 | tend = min(end_pfn, memblock_region_end_pfn(reg)); | |
160 | end = res.base + res.size - 1; | 153 | if (tstart >= tend) |
161 | while ((res.base < end) && (memblock_find(&res) >= 0)) { | 154 | continue; |
162 | pfn = (unsigned long)(res.base >> PAGE_SHIFT); | 155 | ret = (*func)(tstart, tend - tstart, arg); |
163 | len = (unsigned long)(res.size >> PAGE_SHIFT); | ||
164 | ret = (*func)(pfn, len, arg); | ||
165 | if (ret) | 156 | if (ret) |
166 | break; | 157 | break; |
167 | res.base += (res.size + 1); | ||
168 | res.size = (end - res.base + 1); | ||
169 | } | 158 | } |
170 | return ret; | 159 | return ret; |
171 | } | 160 | } |
@@ -179,9 +168,9 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range); | |||
179 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 168 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
180 | void __init do_init_bootmem(void) | 169 | void __init do_init_bootmem(void) |
181 | { | 170 | { |
182 | unsigned long i; | ||
183 | unsigned long start, bootmap_pages; | 171 | unsigned long start, bootmap_pages; |
184 | unsigned long total_pages; | 172 | unsigned long total_pages; |
173 | struct memblock_region *reg; | ||
185 | int boot_mapsize; | 174 | int boot_mapsize; |
186 | 175 | ||
187 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; | 176 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
@@ -204,10 +193,10 @@ void __init do_init_bootmem(void) | |||
204 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); | 193 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); |
205 | 194 | ||
206 | /* Add active regions with valid PFNs */ | 195 | /* Add active regions with valid PFNs */ |
207 | for (i = 0; i < memblock.memory.cnt; i++) { | 196 | for_each_memblock(memory, reg) { |
208 | unsigned long start_pfn, end_pfn; | 197 | unsigned long start_pfn, end_pfn; |
209 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 198 | start_pfn = memblock_region_base_pfn(reg); |
210 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 199 | end_pfn = memblock_region_end_pfn(reg); |
211 | add_active_range(0, start_pfn, end_pfn); | 200 | add_active_range(0, start_pfn, end_pfn); |
212 | } | 201 | } |
213 | 202 | ||
@@ -218,29 +207,21 @@ void __init do_init_bootmem(void) | |||
218 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); | 207 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); |
219 | 208 | ||
220 | /* reserve the sections we're already using */ | 209 | /* reserve the sections we're already using */ |
221 | for (i = 0; i < memblock.reserved.cnt; i++) { | 210 | for_each_memblock(reserved, reg) { |
222 | unsigned long addr = memblock.reserved.region[i].base + | 211 | unsigned long top = reg->base + reg->size - 1; |
223 | memblock_size_bytes(&memblock.reserved, i) - 1; | 212 | if (top < lowmem_end_addr) |
224 | if (addr < lowmem_end_addr) | 213 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
225 | reserve_bootmem(memblock.reserved.region[i].base, | 214 | else if (reg->base < lowmem_end_addr) { |
226 | memblock_size_bytes(&memblock.reserved, i), | 215 | unsigned long trunc_size = lowmem_end_addr - reg->base; |
227 | BOOTMEM_DEFAULT); | 216 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); |
228 | else if (memblock.reserved.region[i].base < lowmem_end_addr) { | ||
229 | unsigned long adjusted_size = lowmem_end_addr - | ||
230 | memblock.reserved.region[i].base; | ||
231 | reserve_bootmem(memblock.reserved.region[i].base, | ||
232 | adjusted_size, BOOTMEM_DEFAULT); | ||
233 | } | 217 | } |
234 | } | 218 | } |
235 | #else | 219 | #else |
236 | free_bootmem_with_active_regions(0, max_pfn); | 220 | free_bootmem_with_active_regions(0, max_pfn); |
237 | 221 | ||
238 | /* reserve the sections we're already using */ | 222 | /* reserve the sections we're already using */ |
239 | for (i = 0; i < memblock.reserved.cnt; i++) | 223 | for_each_memblock(reserved, reg) |
240 | reserve_bootmem(memblock.reserved.region[i].base, | 224 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
241 | memblock_size_bytes(&memblock.reserved, i), | ||
242 | BOOTMEM_DEFAULT); | ||
243 | |||
244 | #endif | 225 | #endif |
245 | /* XXX need to clip this if using highmem? */ | 226 | /* XXX need to clip this if using highmem? */ |
246 | sparse_memory_present_with_active_regions(0); | 227 | sparse_memory_present_with_active_regions(0); |
@@ -251,22 +232,15 @@ void __init do_init_bootmem(void) | |||
251 | /* mark pages that don't exist as nosave */ | 232 | /* mark pages that don't exist as nosave */ |
252 | static int __init mark_nonram_nosave(void) | 233 | static int __init mark_nonram_nosave(void) |
253 | { | 234 | { |
254 | unsigned long memblock_next_region_start_pfn, | 235 | struct memblock_region *reg, *prev = NULL; |
255 | memblock_region_max_pfn; | 236 | |
256 | int i; | 237 | for_each_memblock(memory, reg) { |
257 | 238 | if (prev && | |
258 | for (i = 0; i < memblock.memory.cnt - 1; i++) { | 239 | memblock_region_end_pfn(prev) < memblock_region_base_pfn(reg)) |
259 | memblock_region_max_pfn = | 240 | register_nosave_region(memblock_region_end_pfn(prev), |
260 | (memblock.memory.region[i].base >> PAGE_SHIFT) + | 241 | memblock_region_base_pfn(reg)); |
261 | (memblock.memory.region[i].size >> PAGE_SHIFT); | 242 | prev = reg; |
262 | memblock_next_region_start_pfn = | ||
263 | memblock.memory.region[i+1].base >> PAGE_SHIFT; | ||
264 | |||
265 | if (memblock_region_max_pfn < memblock_next_region_start_pfn) | ||
266 | register_nosave_region(memblock_region_max_pfn, | ||
267 | memblock_next_region_start_pfn); | ||
268 | } | 243 | } |
269 | |||
270 | return 0; | 244 | return 0; |
271 | } | 245 | } |
272 | 246 | ||
@@ -327,7 +301,7 @@ void __init mem_init(void) | |||
327 | swiotlb_init(1); | 301 | swiotlb_init(1); |
328 | #endif | 302 | #endif |
329 | 303 | ||
330 | num_physpages = memblock.memory.size >> PAGE_SHIFT; | 304 | num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; |
331 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 305 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
332 | 306 | ||
333 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 307 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 002878ccf90b..066fb443ba5a 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -802,16 +802,17 @@ static void __init setup_nonnuma(void) | |||
802 | unsigned long top_of_ram = memblock_end_of_DRAM(); | 802 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
803 | unsigned long total_ram = memblock_phys_mem_size(); | 803 | unsigned long total_ram = memblock_phys_mem_size(); |
804 | unsigned long start_pfn, end_pfn; | 804 | unsigned long start_pfn, end_pfn; |
805 | unsigned int i, nid = 0; | 805 | unsigned int nid = 0; |
806 | struct memblock_region *reg; | ||
806 | 807 | ||
807 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 808 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
808 | top_of_ram, total_ram); | 809 | top_of_ram, total_ram); |
809 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 810 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
810 | (top_of_ram - total_ram) >> 20); | 811 | (top_of_ram - total_ram) >> 20); |
811 | 812 | ||
812 | for (i = 0; i < memblock.memory.cnt; ++i) { | 813 | for_each_memblock(memory, reg) { |
813 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 814 | start_pfn = memblock_region_base_pfn(reg); |
814 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 815 | end_pfn = memblock_region_end_pfn(reg); |
815 | 816 | ||
816 | fake_numa_create_new_node(end_pfn, &nid); | 817 | fake_numa_create_new_node(end_pfn, &nid); |
817 | add_active_range(nid, start_pfn, end_pfn); | 818 | add_active_range(nid, start_pfn, end_pfn); |
@@ -947,11 +948,11 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { | |||
947 | static void mark_reserved_regions_for_nid(int nid) | 948 | static void mark_reserved_regions_for_nid(int nid) |
948 | { | 949 | { |
949 | struct pglist_data *node = NODE_DATA(nid); | 950 | struct pglist_data *node = NODE_DATA(nid); |
950 | int i; | 951 | struct memblock_region *reg; |
951 | 952 | ||
952 | for (i = 0; i < memblock.reserved.cnt; i++) { | 953 | for_each_memblock(reserved, reg) { |
953 | unsigned long physbase = memblock.reserved.region[i].base; | 954 | unsigned long physbase = reg->base; |
954 | unsigned long size = memblock.reserved.region[i].size; | 955 | unsigned long size = reg->size; |
955 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | 956 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
956 | unsigned long end_pfn = PFN_UP(physbase + size); | 957 | unsigned long end_pfn = PFN_UP(physbase + size); |
957 | struct node_active_region node_ar; | 958 | struct node_active_region node_ar; |
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f8a01829d64f..11571e118831 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
@@ -223,8 +223,7 @@ void __init MMU_init_hw(void) | |||
223 | * Find some memory for the hash table. | 223 | * Find some memory for the hash table. |
224 | */ | 224 | */ |
225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); | 225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); |
226 | Hash = __va(memblock_alloc_base(Hash_size, Hash_size, | 226 | Hash = __va(memblock_alloc(Hash_size, Hash_size)); |
227 | __initial_memory_limit_addr)); | ||
228 | cacheable_memzero(Hash, Hash_size); | 227 | cacheable_memzero(Hash, Hash_size); |
229 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; | 228 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; |
230 | 229 | ||
@@ -272,3 +271,18 @@ void __init MMU_init_hw(void) | |||
272 | 271 | ||
273 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); | 272 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); |
274 | } | 273 | } |
274 | |||
275 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
276 | phys_addr_t first_memblock_size) | ||
277 | { | ||
278 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
279 | * physical on those processors | ||
280 | */ | ||
281 | BUG_ON(first_memblock_base != 0); | ||
282 | |||
283 | /* 601 can only access 16MB at the moment */ | ||
284 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
285 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); | ||
286 | else /* Anything else has 256M mapped */ | ||
287 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); | ||
288 | } | ||
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index fe391e942521..6a0f20c25469 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -509,6 +509,8 @@ static void __early_init_mmu(int boot_cpu) | |||
509 | * the MMU configuration | 509 | * the MMU configuration |
510 | */ | 510 | */ |
511 | mb(); | 511 | mb(); |
512 | |||
513 | memblock_set_current_limit(linear_map_top); | ||
512 | } | 514 | } |
513 | 515 | ||
514 | void __init early_init_mmu(void) | 516 | void __init early_init_mmu(void) |
@@ -521,4 +523,18 @@ void __cpuinit early_init_mmu_secondary(void) | |||
521 | __early_init_mmu(0); | 523 | __early_init_mmu(0); |
522 | } | 524 | } |
523 | 525 | ||
526 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
527 | phys_addr_t first_memblock_size) | ||
528 | { | ||
529 | /* On Embedded 64-bit, we adjust the RMA size to match | ||
530 | * the bolted TLB entry. We know for now that only 1G | ||
531 | * entries are supported though that may eventually | ||
532 | * change. We crop it to the size of the first MEMBLOCK to | ||
533 | * avoid going over total available memory just in case... | ||
534 | */ | ||
535 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
536 | |||
537 | /* Finally limit subsequent allocations */ | ||
538 | memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); | ||
539 | } | ||
524 | #endif /* CONFIG_PPC64 */ | 540 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 5cdcc7c8d973..8450c29e9b2f 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c | |||
@@ -65,7 +65,7 @@ static int __init page_aligned(unsigned long x) | |||
65 | 65 | ||
66 | void __init wii_memory_fixups(void) | 66 | void __init wii_memory_fixups(void) |
67 | { | 67 | { |
68 | struct memblock_property *p = memblock.memory.region; | 68 | struct memblock_region *p = memblock.memory.region; |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * This is part of a workaround to allow the use of two | 71 | * This is part of a workaround to allow the use of two |
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h index dfe683b88075..e87063fad2ea 100644 --- a/arch/sh/include/asm/memblock.h +++ b/arch/sh/include/asm/memblock.h | |||
@@ -1,6 +1,4 @@ | |||
1 | #ifndef __ASM_SH_MEMBLOCK_H | 1 | #ifndef __ASM_SH_MEMBLOCK_H |
2 | #define __ASM_SH_MEMBLOCK_H | 2 | #define __ASM_SH_MEMBLOCK_H |
3 | 3 | ||
4 | #define MEMBLOCK_REAL_LIMIT 0 | ||
5 | |||
6 | #endif /* __ASM_SH_MEMBLOCK_H */ | 4 | #endif /* __ASM_SH_MEMBLOCK_H */ |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d0e249100e98..b977475f7446 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -200,7 +200,6 @@ static void __init bootmem_init_one_node(unsigned int nid) | |||
200 | unsigned long total_pages, paddr; | 200 | unsigned long total_pages, paddr; |
201 | unsigned long end_pfn; | 201 | unsigned long end_pfn; |
202 | struct pglist_data *p; | 202 | struct pglist_data *p; |
203 | int i; | ||
204 | 203 | ||
205 | p = NODE_DATA(nid); | 204 | p = NODE_DATA(nid); |
206 | 205 | ||
@@ -226,11 +225,12 @@ static void __init bootmem_init_one_node(unsigned int nid) | |||
226 | * reservations in other nodes. | 225 | * reservations in other nodes. |
227 | */ | 226 | */ |
228 | if (nid == 0) { | 227 | if (nid == 0) { |
228 | struct memblock_region *reg; | ||
229 | |||
229 | /* Reserve the sections we're already using. */ | 230 | /* Reserve the sections we're already using. */ |
230 | for (i = 0; i < memblock.reserved.cnt; i++) | 231 | for_each_memblock(reserved, reg) { |
231 | reserve_bootmem(memblock.reserved.region[i].base, | 232 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
232 | memblock_size_bytes(&memblock.reserved, i), | 233 | } |
233 | BOOTMEM_DEFAULT); | ||
234 | } | 234 | } |
235 | 235 | ||
236 | sparse_memory_present_with_active_regions(nid); | 236 | sparse_memory_present_with_active_regions(nid); |
@@ -238,13 +238,14 @@ static void __init bootmem_init_one_node(unsigned int nid) | |||
238 | 238 | ||
239 | static void __init do_init_bootmem(void) | 239 | static void __init do_init_bootmem(void) |
240 | { | 240 | { |
241 | struct memblock_region *reg; | ||
241 | int i; | 242 | int i; |
242 | 243 | ||
243 | /* Add active regions with valid PFNs. */ | 244 | /* Add active regions with valid PFNs. */ |
244 | for (i = 0; i < memblock.memory.cnt; i++) { | 245 | for_each_memblock(memory, reg) { |
245 | unsigned long start_pfn, end_pfn; | 246 | unsigned long start_pfn, end_pfn; |
246 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 247 | start_pfn = memblock_region_base_pfn(reg); |
247 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 248 | end_pfn = memblock_region_end_pfn(reg); |
248 | __add_active_range(0, start_pfn, end_pfn); | 249 | __add_active_range(0, start_pfn, end_pfn); |
249 | } | 250 | } |
250 | 251 | ||
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h index f12af880649b..c67b047ef85e 100644 --- a/arch/sparc/include/asm/memblock.h +++ b/arch/sparc/include/asm/memblock.h | |||
@@ -5,6 +5,4 @@ | |||
5 | 5 | ||
6 | #define MEMBLOCK_DBG(fmt...) prom_printf(fmt) | 6 | #define MEMBLOCK_DBG(fmt...) prom_printf(fmt) |
7 | 7 | ||
8 | #define MEMBLOCK_REAL_LIMIT 0 | ||
9 | |||
10 | #endif /* !(_SPARC64_MEMBLOCK_H) */ | 8 | #endif /* !(_SPARC64_MEMBLOCK_H) */ |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f0434513df15..dc584d26d597 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -785,8 +785,7 @@ static int find_node(unsigned long addr) | |||
785 | return -1; | 785 | return -1; |
786 | } | 786 | } |
787 | 787 | ||
788 | static unsigned long long nid_range(unsigned long long start, | 788 | u64 memblock_nid_range(u64 start, u64 end, int *nid) |
789 | unsigned long long end, int *nid) | ||
790 | { | 789 | { |
791 | *nid = find_node(start); | 790 | *nid = find_node(start); |
792 | start += PAGE_SIZE; | 791 | start += PAGE_SIZE; |
@@ -804,8 +803,7 @@ static unsigned long long nid_range(unsigned long long start, | |||
804 | return start; | 803 | return start; |
805 | } | 804 | } |
806 | #else | 805 | #else |
807 | static unsigned long long nid_range(unsigned long long start, | 806 | u64 memblock_nid_range(u64 start, u64 end, int *nid) |
808 | unsigned long long end, int *nid) | ||
809 | { | 807 | { |
810 | *nid = 0; | 808 | *nid = 0; |
811 | return end; | 809 | return end; |
@@ -822,8 +820,7 @@ static void __init allocate_node_data(int nid) | |||
822 | struct pglist_data *p; | 820 | struct pglist_data *p; |
823 | 821 | ||
824 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 822 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
825 | paddr = memblock_alloc_nid(sizeof(struct pglist_data), | 823 | paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); |
826 | SMP_CACHE_BYTES, nid, nid_range); | ||
827 | if (!paddr) { | 824 | if (!paddr) { |
828 | prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); | 825 | prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); |
829 | prom_halt(); | 826 | prom_halt(); |
@@ -843,8 +840,7 @@ static void __init allocate_node_data(int nid) | |||
843 | if (p->node_spanned_pages) { | 840 | if (p->node_spanned_pages) { |
844 | num_pages = bootmem_bootmap_pages(p->node_spanned_pages); | 841 | num_pages = bootmem_bootmap_pages(p->node_spanned_pages); |
845 | 842 | ||
846 | paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, | 843 | paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid); |
847 | nid_range); | ||
848 | if (!paddr) { | 844 | if (!paddr) { |
849 | prom_printf("Cannot allocate bootmap for nid[%d]\n", | 845 | prom_printf("Cannot allocate bootmap for nid[%d]\n", |
850 | nid); | 846 | nid); |
@@ -972,19 +968,19 @@ int of_node_to_nid(struct device_node *dp) | |||
972 | 968 | ||
973 | static void __init add_node_ranges(void) | 969 | static void __init add_node_ranges(void) |
974 | { | 970 | { |
975 | int i; | 971 | struct memblock_region *reg; |
976 | 972 | ||
977 | for (i = 0; i < memblock.memory.cnt; i++) { | 973 | for_each_memblock(memory, reg) { |
978 | unsigned long size = memblock_size_bytes(&memblock.memory, i); | 974 | unsigned long size = reg->size; |
979 | unsigned long start, end; | 975 | unsigned long start, end; |
980 | 976 | ||
981 | start = memblock.memory.region[i].base; | 977 | start = reg->base; |
982 | end = start + size; | 978 | end = start + size; |
983 | while (start < end) { | 979 | while (start < end) { |
984 | unsigned long this_end; | 980 | unsigned long this_end; |
985 | int nid; | 981 | int nid; |
986 | 982 | ||
987 | this_end = nid_range(start, end, &nid); | 983 | this_end = memblock_nid_range(start, end, &nid); |
988 | 984 | ||
989 | numadbg("Adding active range nid[%d] " | 985 | numadbg("Adding active range nid[%d] " |
990 | "start[%lx] end[%lx]\n", | 986 | "start[%lx] end[%lx]\n", |
@@ -1281,7 +1277,7 @@ static void __init bootmem_init_nonnuma(void) | |||
1281 | { | 1277 | { |
1282 | unsigned long top_of_ram = memblock_end_of_DRAM(); | 1278 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
1283 | unsigned long total_ram = memblock_phys_mem_size(); | 1279 | unsigned long total_ram = memblock_phys_mem_size(); |
1284 | unsigned int i; | 1280 | struct memblock_region *reg; |
1285 | 1281 | ||
1286 | numadbg("bootmem_init_nonnuma()\n"); | 1282 | numadbg("bootmem_init_nonnuma()\n"); |
1287 | 1283 | ||
@@ -1292,15 +1288,14 @@ static void __init bootmem_init_nonnuma(void) | |||
1292 | 1288 | ||
1293 | init_node_masks_nonnuma(); | 1289 | init_node_masks_nonnuma(); |
1294 | 1290 | ||
1295 | for (i = 0; i < memblock.memory.cnt; i++) { | 1291 | for_each_memblock(memory, reg) { |
1296 | unsigned long size = memblock_size_bytes(&memblock.memory, i); | ||
1297 | unsigned long start_pfn, end_pfn; | 1292 | unsigned long start_pfn, end_pfn; |
1298 | 1293 | ||
1299 | if (!size) | 1294 | if (!reg->size) |
1300 | continue; | 1295 | continue; |
1301 | 1296 | ||
1302 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 1297 | start_pfn = memblock_region_base_pfn(reg); |
1303 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 1298 | end_pfn = memblock_region_end_pfn(reg); |
1304 | add_active_range(0, start_pfn, end_pfn); | 1299 | add_active_range(0, start_pfn, end_pfn); |
1305 | } | 1300 | } |
1306 | 1301 | ||
@@ -1318,7 +1313,7 @@ static void __init reserve_range_in_node(int nid, unsigned long start, | |||
1318 | unsigned long this_end; | 1313 | unsigned long this_end; |
1319 | int n; | 1314 | int n; |
1320 | 1315 | ||
1321 | this_end = nid_range(start, end, &n); | 1316 | this_end = memblock_nid_range(start, end, &n); |
1322 | if (n == nid) { | 1317 | if (n == nid) { |
1323 | numadbg(" MATCH reserving range [%lx:%lx]\n", | 1318 | numadbg(" MATCH reserving range [%lx:%lx]\n", |
1324 | start, this_end); | 1319 | start, this_end); |
@@ -1334,17 +1329,12 @@ static void __init reserve_range_in_node(int nid, unsigned long start, | |||
1334 | 1329 | ||
1335 | static void __init trim_reserved_in_node(int nid) | 1330 | static void __init trim_reserved_in_node(int nid) |
1336 | { | 1331 | { |
1337 | int i; | 1332 | struct memblock_region *reg; |
1338 | 1333 | ||
1339 | numadbg(" trim_reserved_in_node(%d)\n", nid); | 1334 | numadbg(" trim_reserved_in_node(%d)\n", nid); |
1340 | 1335 | ||
1341 | for (i = 0; i < memblock.reserved.cnt; i++) { | 1336 | for_each_memblock(reserved, reg) |
1342 | unsigned long start = memblock.reserved.region[i].base; | 1337 | reserve_range_in_node(nid, reg->base, reg->base + reg->size); |
1343 | unsigned long size = memblock_size_bytes(&memblock.reserved, i); | ||
1344 | unsigned long end = start + size; | ||
1345 | |||
1346 | reserve_range_in_node(nid, start, end); | ||
1347 | } | ||
1348 | } | 1338 | } |
1349 | 1339 | ||
1350 | static void __init bootmem_init_one_node(int nid) | 1340 | static void __init bootmem_init_one_node(int nid) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cea0cd9a316f..01a572992fa0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -27,6 +27,7 @@ config X86 | |||
27 | select HAVE_PERF_EVENTS if (!M386 && !M486) | 27 | select HAVE_PERF_EVENTS if (!M386 && !M486) |
28 | select HAVE_IOREMAP_PROT | 28 | select HAVE_IOREMAP_PROT |
29 | select HAVE_KPROBES | 29 | select HAVE_KPROBES |
30 | select HAVE_MEMBLOCK | ||
30 | select ARCH_WANT_OPTIONAL_GPIOLIB | 31 | select ARCH_WANT_OPTIONAL_GPIOLIB |
31 | select ARCH_WANT_FRAME_POINTERS | 32 | select ARCH_WANT_FRAME_POINTERS |
32 | select HAVE_DMA_ATTRS | 33 | select HAVE_DMA_ATTRS |
@@ -193,9 +194,6 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING | |||
193 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC | 194 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC |
194 | def_bool y | 195 | def_bool y |
195 | 196 | ||
196 | config HAVE_EARLY_RES | ||
197 | def_bool y | ||
198 | |||
199 | config HAVE_INTEL_TXT | 197 | config HAVE_INTEL_TXT |
200 | def_bool y | 198 | def_bool y |
201 | depends on EXPERIMENTAL && DMAR && ACPI | 199 | depends on EXPERIMENTAL && DMAR && ACPI |
@@ -590,16 +588,7 @@ config PARAVIRT_DEBUG | |||
590 | a paravirt_op is missing when it is called. | 588 | a paravirt_op is missing when it is called. |
591 | 589 | ||
592 | config NO_BOOTMEM | 590 | config NO_BOOTMEM |
593 | default y | 591 | def_bool y |
594 | bool "Disable Bootmem code" | ||
595 | ---help--- | ||
596 | Use early_res directly instead of bootmem before slab is ready. | ||
597 | - allocator (buddy) [generic] | ||
598 | - early allocator (bootmem) [generic] | ||
599 | - very early allocator (reserve_early*()) [x86] | ||
600 | - very very early allocator (early brk model) [x86] | ||
601 | So reduce one layer between early allocator to final allocator | ||
602 | |||
603 | 592 | ||
604 | config MEMTEST | 593 | config MEMTEST |
605 | bool "Memtest" | 594 | bool "Memtest" |
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index ec8a52d14ab1..5be1542fbfaf 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h | |||
@@ -112,23 +112,13 @@ static inline void early_memtest(unsigned long start, unsigned long end) | |||
112 | } | 112 | } |
113 | #endif | 113 | #endif |
114 | 114 | ||
115 | extern unsigned long end_user_pfn; | ||
116 | |||
117 | extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); | ||
118 | extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); | ||
119 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); | ||
120 | #include <linux/early_res.h> | ||
121 | |||
122 | extern unsigned long e820_end_of_ram_pfn(void); | 115 | extern unsigned long e820_end_of_ram_pfn(void); |
123 | extern unsigned long e820_end_of_low_ram_pfn(void); | 116 | extern unsigned long e820_end_of_low_ram_pfn(void); |
124 | extern int e820_find_active_region(const struct e820entry *ei, | 117 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); |
125 | unsigned long start_pfn, | 118 | |
126 | unsigned long last_pfn, | 119 | void memblock_x86_fill(void); |
127 | unsigned long *ei_startpfn, | 120 | void memblock_find_dma_reserve(void); |
128 | unsigned long *ei_endpfn); | 121 | |
129 | extern void e820_register_active_regions(int nid, unsigned long start_pfn, | ||
130 | unsigned long end_pfn); | ||
131 | extern u64 e820_hole_size(u64 start, u64 end); | ||
132 | extern void finish_e820_parsing(void); | 122 | extern void finish_e820_parsing(void); |
133 | extern void e820_reserve_resources(void); | 123 | extern void e820_reserve_resources(void); |
134 | extern void e820_reserve_resources_late(void); | 124 | extern void e820_reserve_resources_late(void); |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 8406ed7f9926..8e4a16508d4e 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -90,7 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | |||
90 | #endif /* CONFIG_X86_32 */ | 90 | #endif /* CONFIG_X86_32 */ |
91 | 91 | ||
92 | extern int add_efi_memmap; | 92 | extern int add_efi_memmap; |
93 | extern void efi_reserve_early(void); | 93 | extern void efi_memblock_x86_reserve_range(void); |
94 | extern void efi_call_phys_prelog(void); | 94 | extern void efi_call_phys_prelog(void); |
95 | extern void efi_call_phys_epilog(void); | 95 | extern void efi_call_phys_epilog(void); |
96 | 96 | ||
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h new file mode 100644 index 000000000000..2c304bb6e072 --- /dev/null +++ b/arch/x86/include/asm/memblock.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _X86_MEMBLOCK_H | ||
2 | #define _X86_MEMBLOCK_H | ||
3 | |||
4 | #define ARCH_DISCARD_MEMBLOCK | ||
5 | |||
6 | u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); | ||
7 | void memblock_x86_to_bootmem(u64 start, u64 end); | ||
8 | |||
9 | void memblock_x86_reserve_range(u64 start, u64 end, char *name); | ||
10 | void memblock_x86_free_range(u64 start, u64 end); | ||
11 | struct range; | ||
12 | int get_free_all_memory_range(struct range **rangep, int nodeid); | ||
13 | |||
14 | void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, | ||
15 | unsigned long last_pfn); | ||
16 | u64 memblock_x86_hole_size(u64 start, u64 end); | ||
17 | u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); | ||
18 | u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); | ||
19 | u64 memblock_x86_memory_in_range(u64 addr, u64 limit); | ||
20 | |||
21 | #endif | ||
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 33cec152070d..e1252074ea40 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/acpi.h> | 8 | #include <linux/acpi.h> |
9 | #include <linux/bootmem.h> | 9 | #include <linux/bootmem.h> |
10 | #include <linux/memblock.h> | ||
10 | #include <linux/dmi.h> | 11 | #include <linux/dmi.h> |
11 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
12 | #include <asm/segment.h> | 13 | #include <asm/segment.h> |
@@ -125,7 +126,7 @@ void acpi_restore_state_mem(void) | |||
125 | */ | 126 | */ |
126 | void __init acpi_reserve_wakeup_memory(void) | 127 | void __init acpi_reserve_wakeup_memory(void) |
127 | { | 128 | { |
128 | unsigned long mem; | 129 | phys_addr_t mem; |
129 | 130 | ||
130 | if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { | 131 | if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { |
131 | printk(KERN_ERR | 132 | printk(KERN_ERR |
@@ -133,15 +134,15 @@ void __init acpi_reserve_wakeup_memory(void) | |||
133 | return; | 134 | return; |
134 | } | 135 | } |
135 | 136 | ||
136 | mem = find_e820_area(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); | 137 | mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); |
137 | 138 | ||
138 | if (mem == -1L) { | 139 | if (mem == MEMBLOCK_ERROR) { |
139 | printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); | 140 | printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); |
140 | return; | 141 | return; |
141 | } | 142 | } |
142 | acpi_realmode = (unsigned long) phys_to_virt(mem); | 143 | acpi_realmode = (unsigned long) phys_to_virt(mem); |
143 | acpi_wakeup_address = mem; | 144 | acpi_wakeup_address = mem; |
144 | reserve_early(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); | 145 | memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); |
145 | } | 146 | } |
146 | 147 | ||
147 | 148 | ||
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 3e28401f161c..960f26ab5c9f 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/nodemask.h> | 26 | #include <linux/nodemask.h> |
27 | #include <linux/topology.h> | 27 | #include <linux/topology.h> |
28 | #include <linux/bootmem.h> | 28 | #include <linux/bootmem.h> |
29 | #include <linux/memblock.h> | ||
29 | #include <linux/threads.h> | 30 | #include <linux/threads.h> |
30 | #include <linux/cpumask.h> | 31 | #include <linux/cpumask.h> |
31 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
@@ -88,7 +89,7 @@ static inline void numaq_register_node(int node, struct sys_cfg_data *scd) | |||
88 | node_end_pfn[node] = | 89 | node_end_pfn[node] = |
89 | MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); | 90 | MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); |
90 | 91 | ||
91 | e820_register_active_regions(node, node_start_pfn[node], | 92 | memblock_x86_register_active_regions(node, node_start_pfn[node], |
92 | node_end_pfn[node]); | 93 | node_end_pfn[node]); |
93 | 94 | ||
94 | memory_present(node, node_start_pfn[node], node_end_pfn[node]); | 95 | memory_present(node, node_start_pfn[node], node_end_pfn[node]); |
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index fc999e6fc46a..13a389179514 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c | |||
@@ -2,7 +2,8 @@ | |||
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/kthread.h> | 3 | #include <linux/kthread.h> |
4 | #include <linux/workqueue.h> | 4 | #include <linux/workqueue.h> |
5 | #include <asm/e820.h> | 5 | #include <linux/memblock.h> |
6 | |||
6 | #include <asm/proto.h> | 7 | #include <asm/proto.h> |
7 | 8 | ||
8 | /* | 9 | /* |
@@ -18,10 +19,12 @@ static int __read_mostly memory_corruption_check = -1; | |||
18 | static unsigned __read_mostly corruption_check_size = 64*1024; | 19 | static unsigned __read_mostly corruption_check_size = 64*1024; |
19 | static unsigned __read_mostly corruption_check_period = 60; /* seconds */ | 20 | static unsigned __read_mostly corruption_check_period = 60; /* seconds */ |
20 | 21 | ||
21 | static struct e820entry scan_areas[MAX_SCAN_AREAS]; | 22 | static struct scan_area { |
23 | u64 addr; | ||
24 | u64 size; | ||
25 | } scan_areas[MAX_SCAN_AREAS]; | ||
22 | static int num_scan_areas; | 26 | static int num_scan_areas; |
23 | 27 | ||
24 | |||
25 | static __init int set_corruption_check(char *arg) | 28 | static __init int set_corruption_check(char *arg) |
26 | { | 29 | { |
27 | char *end; | 30 | char *end; |
@@ -81,9 +84,9 @@ void __init setup_bios_corruption_check(void) | |||
81 | 84 | ||
82 | while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) { | 85 | while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) { |
83 | u64 size; | 86 | u64 size; |
84 | addr = find_e820_area_size(addr, &size, PAGE_SIZE); | 87 | addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE); |
85 | 88 | ||
86 | if (!(addr + 1)) | 89 | if (addr == MEMBLOCK_ERROR) |
87 | break; | 90 | break; |
88 | 91 | ||
89 | if (addr >= corruption_check_size) | 92 | if (addr >= corruption_check_size) |
@@ -92,7 +95,7 @@ void __init setup_bios_corruption_check(void) | |||
92 | if ((addr + size) > corruption_check_size) | 95 | if ((addr + size) > corruption_check_size) |
93 | size = corruption_check_size - addr; | 96 | size = corruption_check_size - addr; |
94 | 97 | ||
95 | e820_update_range(addr, size, E820_RAM, E820_RESERVED); | 98 | memblock_x86_reserve_range(addr, addr + size, "SCAN RAM"); |
96 | scan_areas[num_scan_areas].addr = addr; | 99 | scan_areas[num_scan_areas].addr = addr; |
97 | scan_areas[num_scan_areas].size = size; | 100 | scan_areas[num_scan_areas].size = size; |
98 | num_scan_areas++; | 101 | num_scan_areas++; |
@@ -105,7 +108,6 @@ void __init setup_bios_corruption_check(void) | |||
105 | 108 | ||
106 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", | 109 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", |
107 | num_scan_areas); | 110 | num_scan_areas); |
108 | update_e820(); | ||
109 | } | 111 | } |
110 | 112 | ||
111 | 113 | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0d6fc71bedb1..0c2b7ef7a34d 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/pfn.h> | 15 | #include <linux/pfn.h> |
16 | #include <linux/suspend.h> | 16 | #include <linux/suspend.h> |
17 | #include <linux/firmware-map.h> | 17 | #include <linux/firmware-map.h> |
18 | #include <linux/memblock.h> | ||
18 | 19 | ||
19 | #include <asm/e820.h> | 20 | #include <asm/e820.h> |
20 | #include <asm/proto.h> | 21 | #include <asm/proto.h> |
@@ -738,73 +739,7 @@ core_initcall(e820_mark_nvs_memory); | |||
738 | #endif | 739 | #endif |
739 | 740 | ||
740 | /* | 741 | /* |
741 | * Find a free area with specified alignment in a specific range. | 742 | * pre allocated 4k and reserved it in memblock and e820_saved |
742 | */ | ||
743 | u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) | ||
744 | { | ||
745 | int i; | ||
746 | |||
747 | for (i = 0; i < e820.nr_map; i++) { | ||
748 | struct e820entry *ei = &e820.map[i]; | ||
749 | u64 addr; | ||
750 | u64 ei_start, ei_last; | ||
751 | |||
752 | if (ei->type != E820_RAM) | ||
753 | continue; | ||
754 | |||
755 | ei_last = ei->addr + ei->size; | ||
756 | ei_start = ei->addr; | ||
757 | addr = find_early_area(ei_start, ei_last, start, end, | ||
758 | size, align); | ||
759 | |||
760 | if (addr != -1ULL) | ||
761 | return addr; | ||
762 | } | ||
763 | return -1ULL; | ||
764 | } | ||
765 | |||
766 | u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align) | ||
767 | { | ||
768 | return find_e820_area(start, end, size, align); | ||
769 | } | ||
770 | |||
771 | u64 __init get_max_mapped(void) | ||
772 | { | ||
773 | u64 end = max_pfn_mapped; | ||
774 | |||
775 | end <<= PAGE_SHIFT; | ||
776 | |||
777 | return end; | ||
778 | } | ||
779 | /* | ||
780 | * Find next free range after *start | ||
781 | */ | ||
782 | u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) | ||
783 | { | ||
784 | int i; | ||
785 | |||
786 | for (i = 0; i < e820.nr_map; i++) { | ||
787 | struct e820entry *ei = &e820.map[i]; | ||
788 | u64 addr; | ||
789 | u64 ei_start, ei_last; | ||
790 | |||
791 | if (ei->type != E820_RAM) | ||
792 | continue; | ||
793 | |||
794 | ei_last = ei->addr + ei->size; | ||
795 | ei_start = ei->addr; | ||
796 | addr = find_early_area_size(ei_start, ei_last, start, | ||
797 | sizep, align); | ||
798 | |||
799 | if (addr != -1ULL) | ||
800 | return addr; | ||
801 | } | ||
802 | |||
803 | return -1ULL; | ||
804 | } | ||
805 | |||
806 | /* | ||
807 | * pre allocated 4k and reserved it in e820 | ||
808 | */ | 743 | */ |
809 | u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | 744 | u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) |
810 | { | 745 | { |
@@ -813,8 +748,8 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | |||
813 | u64 start; | 748 | u64 start; |
814 | 749 | ||
815 | for (start = startt; ; start += size) { | 750 | for (start = startt; ; start += size) { |
816 | start = find_e820_area_size(start, &size, align); | 751 | start = memblock_x86_find_in_range_size(start, &size, align); |
817 | if (!(start + 1)) | 752 | if (start == MEMBLOCK_ERROR) |
818 | return 0; | 753 | return 0; |
819 | if (size >= sizet) | 754 | if (size >= sizet) |
820 | break; | 755 | break; |
@@ -830,10 +765,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) | |||
830 | addr = round_down(start + size - sizet, align); | 765 | addr = round_down(start + size - sizet, align); |
831 | if (addr < start) | 766 | if (addr < start) |
832 | return 0; | 767 | return 0; |
833 | e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); | 768 | memblock_x86_reserve_range(addr, addr + sizet, "new next"); |
834 | e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); | 769 | e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); |
835 | printk(KERN_INFO "update e820 for early_reserve_e820\n"); | 770 | printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); |
836 | update_e820(); | ||
837 | update_e820_saved(); | 771 | update_e820_saved(); |
838 | 772 | ||
839 | return addr; | 773 | return addr; |
@@ -895,74 +829,6 @@ unsigned long __init e820_end_of_low_ram_pfn(void) | |||
895 | { | 829 | { |
896 | return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); | 830 | return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); |
897 | } | 831 | } |
898 | /* | ||
899 | * Finds an active region in the address range from start_pfn to last_pfn and | ||
900 | * returns its range in ei_startpfn and ei_endpfn for the e820 entry. | ||
901 | */ | ||
902 | int __init e820_find_active_region(const struct e820entry *ei, | ||
903 | unsigned long start_pfn, | ||
904 | unsigned long last_pfn, | ||
905 | unsigned long *ei_startpfn, | ||
906 | unsigned long *ei_endpfn) | ||
907 | { | ||
908 | u64 align = PAGE_SIZE; | ||
909 | |||
910 | *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT; | ||
911 | *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT; | ||
912 | |||
913 | /* Skip map entries smaller than a page */ | ||
914 | if (*ei_startpfn >= *ei_endpfn) | ||
915 | return 0; | ||
916 | |||
917 | /* Skip if map is outside the node */ | ||
918 | if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || | ||
919 | *ei_startpfn >= last_pfn) | ||
920 | return 0; | ||
921 | |||
922 | /* Check for overlaps */ | ||
923 | if (*ei_startpfn < start_pfn) | ||
924 | *ei_startpfn = start_pfn; | ||
925 | if (*ei_endpfn > last_pfn) | ||
926 | *ei_endpfn = last_pfn; | ||
927 | |||
928 | return 1; | ||
929 | } | ||
930 | |||
931 | /* Walk the e820 map and register active regions within a node */ | ||
932 | void __init e820_register_active_regions(int nid, unsigned long start_pfn, | ||
933 | unsigned long last_pfn) | ||
934 | { | ||
935 | unsigned long ei_startpfn; | ||
936 | unsigned long ei_endpfn; | ||
937 | int i; | ||
938 | |||
939 | for (i = 0; i < e820.nr_map; i++) | ||
940 | if (e820_find_active_region(&e820.map[i], | ||
941 | start_pfn, last_pfn, | ||
942 | &ei_startpfn, &ei_endpfn)) | ||
943 | add_active_range(nid, ei_startpfn, ei_endpfn); | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * Find the hole size (in bytes) in the memory range. | ||
948 | * @start: starting address of the memory range to scan | ||
949 | * @end: ending address of the memory range to scan | ||
950 | */ | ||
951 | u64 __init e820_hole_size(u64 start, u64 end) | ||
952 | { | ||
953 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
954 | unsigned long last_pfn = end >> PAGE_SHIFT; | ||
955 | unsigned long ei_startpfn, ei_endpfn, ram = 0; | ||
956 | int i; | ||
957 | |||
958 | for (i = 0; i < e820.nr_map; i++) { | ||
959 | if (e820_find_active_region(&e820.map[i], | ||
960 | start_pfn, last_pfn, | ||
961 | &ei_startpfn, &ei_endpfn)) | ||
962 | ram += ei_endpfn - ei_startpfn; | ||
963 | } | ||
964 | return end - start - ((u64)ram << PAGE_SHIFT); | ||
965 | } | ||
966 | 832 | ||
967 | static void early_panic(char *msg) | 833 | static void early_panic(char *msg) |
968 | { | 834 | { |
@@ -1210,3 +1076,48 @@ void __init setup_memory_map(void) | |||
1210 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); | 1076 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); |
1211 | e820_print_map(who); | 1077 | e820_print_map(who); |
1212 | } | 1078 | } |
1079 | |||
1080 | void __init memblock_x86_fill(void) | ||
1081 | { | ||
1082 | int i; | ||
1083 | u64 end; | ||
1084 | |||
1085 | /* | ||
1086 | * EFI may have more than 128 entries | ||
1087 | * We are safe to enable resizing, beause memblock_x86_fill() | ||
1088 | * is rather later for x86 | ||
1089 | */ | ||
1090 | memblock_can_resize = 1; | ||
1091 | |||
1092 | for (i = 0; i < e820.nr_map; i++) { | ||
1093 | struct e820entry *ei = &e820.map[i]; | ||
1094 | |||
1095 | end = ei->addr + ei->size; | ||
1096 | if (end != (resource_size_t)end) | ||
1097 | continue; | ||
1098 | |||
1099 | if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) | ||
1100 | continue; | ||
1101 | |||
1102 | memblock_add(ei->addr, ei->size); | ||
1103 | } | ||
1104 | |||
1105 | memblock_analyze(); | ||
1106 | memblock_dump_all(); | ||
1107 | } | ||
1108 | |||
1109 | void __init memblock_find_dma_reserve(void) | ||
1110 | { | ||
1111 | #ifdef CONFIG_X86_64 | ||
1112 | u64 free_size_pfn; | ||
1113 | u64 mem_size_pfn; | ||
1114 | /* | ||
1115 | * need to find out used area below MAX_DMA_PFN | ||
1116 | * need to use memblock to get free size in [0, MAX_DMA_PFN] | ||
1117 | * at first, and assume boot_mem will not take below MAX_DMA_PFN | ||
1118 | */ | ||
1119 | mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | ||
1120 | free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; | ||
1121 | set_dma_reserve(mem_size_pfn - free_size_pfn); | ||
1122 | #endif | ||
1123 | } | ||
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index c2fa9b8b497e..0fe27d7c6258 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/efi.h> | 31 | #include <linux/efi.h> |
32 | #include <linux/bootmem.h> | 32 | #include <linux/bootmem.h> |
33 | #include <linux/memblock.h> | ||
33 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
34 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
35 | #include <linux/time.h> | 36 | #include <linux/time.h> |
@@ -275,7 +276,7 @@ static void __init do_add_efi_memmap(void) | |||
275 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 276 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
276 | } | 277 | } |
277 | 278 | ||
278 | void __init efi_reserve_early(void) | 279 | void __init efi_memblock_x86_reserve_range(void) |
279 | { | 280 | { |
280 | unsigned long pmap; | 281 | unsigned long pmap; |
281 | 282 | ||
@@ -290,7 +291,7 @@ void __init efi_reserve_early(void) | |||
290 | boot_params.efi_info.efi_memdesc_size; | 291 | boot_params.efi_info.efi_memdesc_size; |
291 | memmap.desc_version = boot_params.efi_info.efi_memdesc_version; | 292 | memmap.desc_version = boot_params.efi_info.efi_memdesc_version; |
292 | memmap.desc_size = boot_params.efi_info.efi_memdesc_size; | 293 | memmap.desc_size = boot_params.efi_info.efi_memdesc_size; |
293 | reserve_early(pmap, pmap + memmap.nr_map * memmap.desc_size, | 294 | memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, |
294 | "EFI memmap"); | 295 | "EFI memmap"); |
295 | } | 296 | } |
296 | 297 | ||
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index 3e66bd364a9d..af0699ba48cf 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/init.h> | 2 | #include <linux/init.h> |
3 | #include <linux/memblock.h> | ||
3 | 4 | ||
4 | #include <asm/setup.h> | 5 | #include <asm/setup.h> |
5 | #include <asm/bios_ebda.h> | 6 | #include <asm/bios_ebda.h> |
@@ -51,5 +52,5 @@ void __init reserve_ebda_region(void) | |||
51 | lowmem = 0x9f000; | 52 | lowmem = 0x9f000; |
52 | 53 | ||
53 | /* reserve all memory between lowmem and the 1MB mark */ | 54 | /* reserve all memory between lowmem and the 1MB mark */ |
54 | reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved"); | 55 | memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); |
55 | } | 56 | } |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 784360c0625c..9a6ca2392170 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/start_kernel.h> | 9 | #include <linux/start_kernel.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/memblock.h> | ||
11 | 12 | ||
12 | #include <asm/setup.h> | 13 | #include <asm/setup.h> |
13 | #include <asm/sections.h> | 14 | #include <asm/sections.h> |
@@ -30,17 +31,18 @@ static void __init i386_default_early_setup(void) | |||
30 | 31 | ||
31 | void __init i386_start_kernel(void) | 32 | void __init i386_start_kernel(void) |
32 | { | 33 | { |
34 | memblock_init(); | ||
35 | |||
33 | #ifdef CONFIG_X86_TRAMPOLINE | 36 | #ifdef CONFIG_X86_TRAMPOLINE |
34 | /* | 37 | /* |
35 | * But first pinch a few for the stack/trampoline stuff | 38 | * But first pinch a few for the stack/trampoline stuff |
36 | * FIXME: Don't need the extra page at 4K, but need to fix | 39 | * FIXME: Don't need the extra page at 4K, but need to fix |
37 | * trampoline before removing it. (see the GDT stuff) | 40 | * trampoline before removing it. (see the GDT stuff) |
38 | */ | 41 | */ |
39 | reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, | 42 | memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); |
40 | "EX TRAMPOLINE"); | ||
41 | #endif | 43 | #endif |
42 | 44 | ||
43 | reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | 45 | memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); |
44 | 46 | ||
45 | #ifdef CONFIG_BLK_DEV_INITRD | 47 | #ifdef CONFIG_BLK_DEV_INITRD |
46 | /* Reserve INITRD */ | 48 | /* Reserve INITRD */ |
@@ -49,7 +51,7 @@ void __init i386_start_kernel(void) | |||
49 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; | 51 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
50 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; | 52 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
51 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); | 53 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
52 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); | 54 | memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); |
53 | } | 55 | } |
54 | #endif | 56 | #endif |
55 | 57 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 7147143fd614..97adf9828b95 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/start_kernel.h> | 13 | #include <linux/start_kernel.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/memblock.h> | ||
15 | 16 | ||
16 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
17 | #include <asm/proto.h> | 18 | #include <asm/proto.h> |
@@ -98,7 +99,9 @@ void __init x86_64_start_reservations(char *real_mode_data) | |||
98 | { | 99 | { |
99 | copy_bootdata(__va(real_mode_data)); | 100 | copy_bootdata(__va(real_mode_data)); |
100 | 101 | ||
101 | reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | 102 | memblock_init(); |
103 | |||
104 | memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | ||
102 | 105 | ||
103 | #ifdef CONFIG_BLK_DEV_INITRD | 106 | #ifdef CONFIG_BLK_DEV_INITRD |
104 | /* Reserve INITRD */ | 107 | /* Reserve INITRD */ |
@@ -107,7 +110,7 @@ void __init x86_64_start_reservations(char *real_mode_data) | |||
107 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | 110 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; |
108 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; | 111 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; |
109 | unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); | 112 | unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
110 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); | 113 | memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); |
111 | } | 114 | } |
112 | #endif | 115 | #endif |
113 | 116 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index d7b6f7fb4fec..9af64d9c4b67 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
14 | #include <linux/memblock.h> | ||
14 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
15 | #include <linux/mc146818rtc.h> | 16 | #include <linux/mc146818rtc.h> |
16 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
@@ -657,7 +658,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf) | |||
657 | { | 658 | { |
658 | unsigned long size = get_mpc_size(mpf->physptr); | 659 | unsigned long size = get_mpc_size(mpf->physptr); |
659 | 660 | ||
660 | reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc"); | 661 | memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc"); |
661 | } | 662 | } |
662 | 663 | ||
663 | static int __init smp_scan_config(unsigned long base, unsigned long length) | 664 | static int __init smp_scan_config(unsigned long base, unsigned long length) |
@@ -686,7 +687,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) | |||
686 | mpf, (u64)virt_to_phys(mpf)); | 687 | mpf, (u64)virt_to_phys(mpf)); |
687 | 688 | ||
688 | mem = virt_to_phys(mpf); | 689 | mem = virt_to_phys(mpf); |
689 | reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf"); | 690 | memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf"); |
690 | if (mpf->physptr) | 691 | if (mpf->physptr) |
691 | smp_reserve_memory(mpf); | 692 | smp_reserve_memory(mpf); |
692 | 693 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c3a4fbb2b996..bf89e0a59b88 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/apm_bios.h> | 31 | #include <linux/apm_bios.h> |
32 | #include <linux/initrd.h> | 32 | #include <linux/initrd.h> |
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/memblock.h> | ||
34 | #include <linux/seq_file.h> | 35 | #include <linux/seq_file.h> |
35 | #include <linux/console.h> | 36 | #include <linux/console.h> |
36 | #include <linux/mca.h> | 37 | #include <linux/mca.h> |
@@ -302,7 +303,7 @@ static inline void init_gbpages(void) | |||
302 | static void __init reserve_brk(void) | 303 | static void __init reserve_brk(void) |
303 | { | 304 | { |
304 | if (_brk_end > _brk_start) | 305 | if (_brk_end > _brk_start) |
305 | reserve_early(__pa(_brk_start), __pa(_brk_end), "BRK"); | 306 | memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); |
306 | 307 | ||
307 | /* Mark brk area as locked down and no longer taking any | 308 | /* Mark brk area as locked down and no longer taking any |
308 | new allocations */ | 309 | new allocations */ |
@@ -324,17 +325,16 @@ static void __init relocate_initrd(void) | |||
324 | char *p, *q; | 325 | char *p, *q; |
325 | 326 | ||
326 | /* We need to move the initrd down into lowmem */ | 327 | /* We need to move the initrd down into lowmem */ |
327 | ramdisk_here = find_e820_area(0, end_of_lowmem, area_size, | 328 | ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, |
328 | PAGE_SIZE); | 329 | PAGE_SIZE); |
329 | 330 | ||
330 | if (ramdisk_here == -1ULL) | 331 | if (ramdisk_here == MEMBLOCK_ERROR) |
331 | panic("Cannot find place for new RAMDISK of size %lld\n", | 332 | panic("Cannot find place for new RAMDISK of size %lld\n", |
332 | ramdisk_size); | 333 | ramdisk_size); |
333 | 334 | ||
334 | /* Note: this includes all the lowmem currently occupied by | 335 | /* Note: this includes all the lowmem currently occupied by |
335 | the initrd, we rely on that fact to keep the data intact. */ | 336 | the initrd, we rely on that fact to keep the data intact. */ |
336 | reserve_early(ramdisk_here, ramdisk_here + area_size, | 337 | memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); |
337 | "NEW RAMDISK"); | ||
338 | initrd_start = ramdisk_here + PAGE_OFFSET; | 338 | initrd_start = ramdisk_here + PAGE_OFFSET; |
339 | initrd_end = initrd_start + ramdisk_size; | 339 | initrd_end = initrd_start + ramdisk_size; |
340 | printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", | 340 | printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", |
@@ -390,7 +390,7 @@ static void __init reserve_initrd(void) | |||
390 | initrd_start = 0; | 390 | initrd_start = 0; |
391 | 391 | ||
392 | if (ramdisk_size >= (end_of_lowmem>>1)) { | 392 | if (ramdisk_size >= (end_of_lowmem>>1)) { |
393 | free_early(ramdisk_image, ramdisk_end); | 393 | memblock_x86_free_range(ramdisk_image, ramdisk_end); |
394 | printk(KERN_ERR "initrd too large to handle, " | 394 | printk(KERN_ERR "initrd too large to handle, " |
395 | "disabling initrd\n"); | 395 | "disabling initrd\n"); |
396 | return; | 396 | return; |
@@ -413,7 +413,7 @@ static void __init reserve_initrd(void) | |||
413 | 413 | ||
414 | relocate_initrd(); | 414 | relocate_initrd(); |
415 | 415 | ||
416 | free_early(ramdisk_image, ramdisk_end); | 416 | memblock_x86_free_range(ramdisk_image, ramdisk_end); |
417 | } | 417 | } |
418 | #else | 418 | #else |
419 | static void __init reserve_initrd(void) | 419 | static void __init reserve_initrd(void) |
@@ -469,7 +469,7 @@ static void __init e820_reserve_setup_data(void) | |||
469 | e820_print_map("reserve setup_data"); | 469 | e820_print_map("reserve setup_data"); |
470 | } | 470 | } |
471 | 471 | ||
472 | static void __init reserve_early_setup_data(void) | 472 | static void __init memblock_x86_reserve_range_setup_data(void) |
473 | { | 473 | { |
474 | struct setup_data *data; | 474 | struct setup_data *data; |
475 | u64 pa_data; | 475 | u64 pa_data; |
@@ -481,7 +481,7 @@ static void __init reserve_early_setup_data(void) | |||
481 | while (pa_data) { | 481 | while (pa_data) { |
482 | data = early_memremap(pa_data, sizeof(*data)); | 482 | data = early_memremap(pa_data, sizeof(*data)); |
483 | sprintf(buf, "setup data %x", data->type); | 483 | sprintf(buf, "setup data %x", data->type); |
484 | reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf); | 484 | memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf); |
485 | pa_data = data->next; | 485 | pa_data = data->next; |
486 | early_iounmap(data, sizeof(*data)); | 486 | early_iounmap(data, sizeof(*data)); |
487 | } | 487 | } |
@@ -519,23 +519,23 @@ static void __init reserve_crashkernel(void) | |||
519 | if (crash_base <= 0) { | 519 | if (crash_base <= 0) { |
520 | const unsigned long long alignment = 16<<20; /* 16M */ | 520 | const unsigned long long alignment = 16<<20; /* 16M */ |
521 | 521 | ||
522 | crash_base = find_e820_area(alignment, ULONG_MAX, crash_size, | 522 | crash_base = memblock_find_in_range(alignment, ULONG_MAX, crash_size, |
523 | alignment); | 523 | alignment); |
524 | if (crash_base == -1ULL) { | 524 | if (crash_base == MEMBLOCK_ERROR) { |
525 | pr_info("crashkernel reservation failed - No suitable area found.\n"); | 525 | pr_info("crashkernel reservation failed - No suitable area found.\n"); |
526 | return; | 526 | return; |
527 | } | 527 | } |
528 | } else { | 528 | } else { |
529 | unsigned long long start; | 529 | unsigned long long start; |
530 | 530 | ||
531 | start = find_e820_area(crash_base, ULONG_MAX, crash_size, | 531 | start = memblock_find_in_range(crash_base, ULONG_MAX, crash_size, |
532 | 1<<20); | 532 | 1<<20); |
533 | if (start != crash_base) { | 533 | if (start != crash_base) { |
534 | pr_info("crashkernel reservation failed - memory is in use.\n"); | 534 | pr_info("crashkernel reservation failed - memory is in use.\n"); |
535 | return; | 535 | return; |
536 | } | 536 | } |
537 | } | 537 | } |
538 | reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL"); | 538 | memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); |
539 | 539 | ||
540 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | 540 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " |
541 | "for crashkernel (System RAM: %ldMB)\n", | 541 | "for crashkernel (System RAM: %ldMB)\n", |
@@ -615,7 +615,7 @@ static __init void reserve_ibft_region(void) | |||
615 | addr = find_ibft_region(&size); | 615 | addr = find_ibft_region(&size); |
616 | 616 | ||
617 | if (size) | 617 | if (size) |
618 | reserve_early_overlap_ok(addr, addr + size, "ibft"); | 618 | memblock_x86_reserve_range(addr, addr + size, "* ibft"); |
619 | } | 619 | } |
620 | 620 | ||
621 | #ifdef CONFIG_X86_RESERVE_LOW_64K | 621 | #ifdef CONFIG_X86_RESERVE_LOW_64K |
@@ -709,6 +709,15 @@ static void __init trim_bios_range(void) | |||
709 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 709 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
710 | } | 710 | } |
711 | 711 | ||
712 | static u64 __init get_max_mapped(void) | ||
713 | { | ||
714 | u64 end = max_pfn_mapped; | ||
715 | |||
716 | end <<= PAGE_SHIFT; | ||
717 | |||
718 | return end; | ||
719 | } | ||
720 | |||
712 | /* | 721 | /* |
713 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 722 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
714 | * passed the efi memmap, systab, etc., so we should use these data structures | 723 | * passed the efi memmap, systab, etc., so we should use these data structures |
@@ -782,7 +791,7 @@ void __init setup_arch(char **cmdline_p) | |||
782 | #endif | 791 | #endif |
783 | 4)) { | 792 | 4)) { |
784 | efi_enabled = 1; | 793 | efi_enabled = 1; |
785 | efi_reserve_early(); | 794 | efi_memblock_x86_reserve_range(); |
786 | } | 795 | } |
787 | #endif | 796 | #endif |
788 | 797 | ||
@@ -842,7 +851,7 @@ void __init setup_arch(char **cmdline_p) | |||
842 | vmi_activate(); | 851 | vmi_activate(); |
843 | 852 | ||
844 | /* after early param, so could get panic from serial */ | 853 | /* after early param, so could get panic from serial */ |
845 | reserve_early_setup_data(); | 854 | memblock_x86_reserve_range_setup_data(); |
846 | 855 | ||
847 | if (acpi_mps_check()) { | 856 | if (acpi_mps_check()) { |
848 | #ifdef CONFIG_X86_LOCAL_APIC | 857 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -897,8 +906,6 @@ void __init setup_arch(char **cmdline_p) | |||
897 | */ | 906 | */ |
898 | max_pfn = e820_end_of_ram_pfn(); | 907 | max_pfn = e820_end_of_ram_pfn(); |
899 | 908 | ||
900 | /* preallocate 4k for mptable mpc */ | ||
901 | early_reserve_e820_mpc_new(); | ||
902 | /* update e820 for memory not covered by WB MTRRs */ | 909 | /* update e820 for memory not covered by WB MTRRs */ |
903 | mtrr_bp_init(); | 910 | mtrr_bp_init(); |
904 | if (mtrr_trim_uncached_memory(max_pfn)) | 911 | if (mtrr_trim_uncached_memory(max_pfn)) |
@@ -923,15 +930,6 @@ void __init setup_arch(char **cmdline_p) | |||
923 | max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; | 930 | max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; |
924 | #endif | 931 | #endif |
925 | 932 | ||
926 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION | ||
927 | setup_bios_corruption_check(); | ||
928 | #endif | ||
929 | |||
930 | printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", | ||
931 | max_pfn_mapped<<PAGE_SHIFT); | ||
932 | |||
933 | reserve_brk(); | ||
934 | |||
935 | /* | 933 | /* |
936 | * Find and reserve possible boot-time SMP configuration: | 934 | * Find and reserve possible boot-time SMP configuration: |
937 | */ | 935 | */ |
@@ -939,6 +937,26 @@ void __init setup_arch(char **cmdline_p) | |||
939 | 937 | ||
940 | reserve_ibft_region(); | 938 | reserve_ibft_region(); |
941 | 939 | ||
940 | /* | ||
941 | * Need to conclude brk, before memblock_x86_fill() | ||
942 | * it could use memblock_find_in_range, could overlap with | ||
943 | * brk area. | ||
944 | */ | ||
945 | reserve_brk(); | ||
946 | |||
947 | memblock.current_limit = get_max_mapped(); | ||
948 | memblock_x86_fill(); | ||
949 | |||
950 | /* preallocate 4k for mptable mpc */ | ||
951 | early_reserve_e820_mpc_new(); | ||
952 | |||
953 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION | ||
954 | setup_bios_corruption_check(); | ||
955 | #endif | ||
956 | |||
957 | printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", | ||
958 | max_pfn_mapped<<PAGE_SHIFT); | ||
959 | |||
942 | reserve_trampoline_memory(); | 960 | reserve_trampoline_memory(); |
943 | 961 | ||
944 | #ifdef CONFIG_ACPI_SLEEP | 962 | #ifdef CONFIG_ACPI_SLEEP |
@@ -962,6 +980,7 @@ void __init setup_arch(char **cmdline_p) | |||
962 | max_low_pfn = max_pfn; | 980 | max_low_pfn = max_pfn; |
963 | } | 981 | } |
964 | #endif | 982 | #endif |
983 | memblock.current_limit = get_max_mapped(); | ||
965 | 984 | ||
966 | /* | 985 | /* |
967 | * NOTE: On x86-32, only from this point on, fixmaps are ready for use. | 986 | * NOTE: On x86-32, only from this point on, fixmaps are ready for use. |
@@ -1000,10 +1019,7 @@ void __init setup_arch(char **cmdline_p) | |||
1000 | #endif | 1019 | #endif |
1001 | 1020 | ||
1002 | initmem_init(0, max_pfn, acpi, k8); | 1021 | initmem_init(0, max_pfn, acpi, k8); |
1003 | #ifndef CONFIG_NO_BOOTMEM | 1022 | memblock_find_dma_reserve(); |
1004 | early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT); | ||
1005 | #endif | ||
1006 | |||
1007 | dma32_reserve_bootmem(); | 1023 | dma32_reserve_bootmem(); |
1008 | 1024 | ||
1009 | #ifdef CONFIG_KVM_CLOCK | 1025 | #ifdef CONFIG_KVM_CLOCK |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a60df9ae6454..42e2633f369e 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -131,13 +131,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) | |||
131 | 131 | ||
132 | static void __init pcpu_fc_free(void *ptr, size_t size) | 132 | static void __init pcpu_fc_free(void *ptr, size_t size) |
133 | { | 133 | { |
134 | #ifdef CONFIG_NO_BOOTMEM | ||
135 | u64 start = __pa(ptr); | ||
136 | u64 end = start + size; | ||
137 | free_early_partial(start, end); | ||
138 | #else | ||
139 | free_bootmem(__pa(ptr), size); | 134 | free_bootmem(__pa(ptr), size); |
140 | #endif | ||
141 | } | 135 | } |
142 | 136 | ||
143 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) | 137 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index a874495b3673..9a3d44c0df9a 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -1,8 +1,8 @@ | |||
1 | #include <linux/io.h> | 1 | #include <linux/io.h> |
2 | #include <linux/memblock.h> | ||
2 | 3 | ||
3 | #include <asm/trampoline.h> | 4 | #include <asm/trampoline.h> |
4 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
5 | #include <asm/e820.h> | ||
6 | 6 | ||
7 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) | 7 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) |
8 | #define __trampinit | 8 | #define __trampinit |
@@ -17,15 +17,15 @@ unsigned char *__trampinitdata trampoline_base; | |||
17 | 17 | ||
18 | void __init reserve_trampoline_memory(void) | 18 | void __init reserve_trampoline_memory(void) |
19 | { | 19 | { |
20 | unsigned long mem; | 20 | phys_addr_t mem; |
21 | 21 | ||
22 | /* Has to be in very low memory so we can execute real-mode AP code. */ | 22 | /* Has to be in very low memory so we can execute real-mode AP code. */ |
23 | mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); | 23 | mem = memblock_find_in_range(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); |
24 | if (mem == -1L) | 24 | if (mem == MEMBLOCK_ERROR) |
25 | panic("Cannot allocate trampoline\n"); | 25 | panic("Cannot allocate trampoline\n"); |
26 | 26 | ||
27 | trampoline_base = __va(mem); | 27 | trampoline_base = __va(mem); |
28 | reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); | 28 | memblock_x86_reserve_range(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); |
29 | } | 29 | } |
30 | 30 | ||
31 | /* | 31 | /* |
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index a4c768397baa..55543397a8a7 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o | |||
26 | obj-$(CONFIG_K8_NUMA) += k8topology_64.o | 26 | obj-$(CONFIG_K8_NUMA) += k8topology_64.o |
27 | obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o | 27 | obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o |
28 | 28 | ||
29 | obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o | ||
30 | |||
29 | obj-$(CONFIG_MEMTEST) += memtest.o | 31 | obj-$(CONFIG_MEMTEST) += memtest.o |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index b278535b14aa..c0e28a13de7d 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/initrd.h> | 2 | #include <linux/initrd.h> |
3 | #include <linux/ioport.h> | 3 | #include <linux/ioport.h> |
4 | #include <linux/swap.h> | 4 | #include <linux/swap.h> |
5 | #include <linux/memblock.h> | ||
5 | 6 | ||
6 | #include <asm/cacheflush.h> | 7 | #include <asm/cacheflush.h> |
7 | #include <asm/e820.h> | 8 | #include <asm/e820.h> |
@@ -33,6 +34,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
33 | int use_gbpages) | 34 | int use_gbpages) |
34 | { | 35 | { |
35 | unsigned long puds, pmds, ptes, tables, start; | 36 | unsigned long puds, pmds, ptes, tables, start; |
37 | phys_addr_t base; | ||
36 | 38 | ||
37 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 39 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
38 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); | 40 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
@@ -75,12 +77,12 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
75 | #else | 77 | #else |
76 | start = 0x8000; | 78 | start = 0x8000; |
77 | #endif | 79 | #endif |
78 | e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT, | 80 | base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT, |
79 | tables, PAGE_SIZE); | 81 | tables, PAGE_SIZE); |
80 | if (e820_table_start == -1UL) | 82 | if (base == MEMBLOCK_ERROR) |
81 | panic("Cannot find space for the kernel page tables"); | 83 | panic("Cannot find space for the kernel page tables"); |
82 | 84 | ||
83 | e820_table_start >>= PAGE_SHIFT; | 85 | e820_table_start = base >> PAGE_SHIFT; |
84 | e820_table_end = e820_table_start; | 86 | e820_table_end = e820_table_start; |
85 | e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); | 87 | e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); |
86 | 88 | ||
@@ -299,7 +301,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
299 | __flush_tlb_all(); | 301 | __flush_tlb_all(); |
300 | 302 | ||
301 | if (!after_bootmem && e820_table_end > e820_table_start) | 303 | if (!after_bootmem && e820_table_end > e820_table_start) |
302 | reserve_early(e820_table_start << PAGE_SHIFT, | 304 | memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, |
303 | e820_table_end << PAGE_SHIFT, "PGTABLE"); | 305 | e820_table_end << PAGE_SHIFT, "PGTABLE"); |
304 | 306 | ||
305 | if (!after_bootmem) | 307 | if (!after_bootmem) |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bca79091b9d6..c2385d7ae313 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/pfn.h> | 25 | #include <linux/pfn.h> |
26 | #include <linux/poison.h> | 26 | #include <linux/poison.h> |
27 | #include <linux/bootmem.h> | 27 | #include <linux/bootmem.h> |
28 | #include <linux/memblock.h> | ||
28 | #include <linux/proc_fs.h> | 29 | #include <linux/proc_fs.h> |
29 | #include <linux/memory_hotplug.h> | 30 | #include <linux/memory_hotplug.h> |
30 | #include <linux/initrd.h> | 31 | #include <linux/initrd.h> |
@@ -712,14 +713,14 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | |||
712 | highstart_pfn = highend_pfn = max_pfn; | 713 | highstart_pfn = highend_pfn = max_pfn; |
713 | if (max_pfn > max_low_pfn) | 714 | if (max_pfn > max_low_pfn) |
714 | highstart_pfn = max_low_pfn; | 715 | highstart_pfn = max_low_pfn; |
715 | e820_register_active_regions(0, 0, highend_pfn); | 716 | memblock_x86_register_active_regions(0, 0, highend_pfn); |
716 | sparse_memory_present_with_active_regions(0); | 717 | sparse_memory_present_with_active_regions(0); |
717 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 718 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
718 | pages_to_mb(highend_pfn - highstart_pfn)); | 719 | pages_to_mb(highend_pfn - highstart_pfn)); |
719 | num_physpages = highend_pfn; | 720 | num_physpages = highend_pfn; |
720 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | 721 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; |
721 | #else | 722 | #else |
722 | e820_register_active_regions(0, 0, max_low_pfn); | 723 | memblock_x86_register_active_regions(0, 0, max_low_pfn); |
723 | sparse_memory_present_with_active_regions(0); | 724 | sparse_memory_present_with_active_regions(0); |
724 | num_physpages = max_low_pfn; | 725 | num_physpages = max_low_pfn; |
725 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | 726 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
@@ -750,68 +751,12 @@ static void __init zone_sizes_init(void) | |||
750 | free_area_init_nodes(max_zone_pfns); | 751 | free_area_init_nodes(max_zone_pfns); |
751 | } | 752 | } |
752 | 753 | ||
753 | #ifndef CONFIG_NO_BOOTMEM | ||
754 | static unsigned long __init setup_node_bootmem(int nodeid, | ||
755 | unsigned long start_pfn, | ||
756 | unsigned long end_pfn, | ||
757 | unsigned long bootmap) | ||
758 | { | ||
759 | unsigned long bootmap_size; | ||
760 | |||
761 | /* don't touch min_low_pfn */ | ||
762 | bootmap_size = init_bootmem_node(NODE_DATA(nodeid), | ||
763 | bootmap >> PAGE_SHIFT, | ||
764 | start_pfn, end_pfn); | ||
765 | printk(KERN_INFO " node %d low ram: %08lx - %08lx\n", | ||
766 | nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
767 | printk(KERN_INFO " node %d bootmap %08lx - %08lx\n", | ||
768 | nodeid, bootmap, bootmap + bootmap_size); | ||
769 | free_bootmem_with_active_regions(nodeid, end_pfn); | ||
770 | |||
771 | return bootmap + bootmap_size; | ||
772 | } | ||
773 | #endif | ||
774 | |||
775 | void __init setup_bootmem_allocator(void) | 754 | void __init setup_bootmem_allocator(void) |
776 | { | 755 | { |
777 | #ifndef CONFIG_NO_BOOTMEM | ||
778 | int nodeid; | ||
779 | unsigned long bootmap_size, bootmap; | ||
780 | /* | ||
781 | * Initialize the boot-time allocator (with low memory only): | ||
782 | */ | ||
783 | bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT; | ||
784 | bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size, | ||
785 | PAGE_SIZE); | ||
786 | if (bootmap == -1L) | ||
787 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | ||
788 | reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); | ||
789 | #endif | ||
790 | |||
791 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", | 756 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", |
792 | max_pfn_mapped<<PAGE_SHIFT); | 757 | max_pfn_mapped<<PAGE_SHIFT); |
793 | printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); | 758 | printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); |
794 | 759 | ||
795 | #ifndef CONFIG_NO_BOOTMEM | ||
796 | for_each_online_node(nodeid) { | ||
797 | unsigned long start_pfn, end_pfn; | ||
798 | |||
799 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
800 | start_pfn = node_start_pfn[nodeid]; | ||
801 | end_pfn = node_end_pfn[nodeid]; | ||
802 | if (start_pfn > max_low_pfn) | ||
803 | continue; | ||
804 | if (end_pfn > max_low_pfn) | ||
805 | end_pfn = max_low_pfn; | ||
806 | #else | ||
807 | start_pfn = 0; | ||
808 | end_pfn = max_low_pfn; | ||
809 | #endif | ||
810 | bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn, | ||
811 | bootmap); | ||
812 | } | ||
813 | #endif | ||
814 | |||
815 | after_bootmem = 1; | 760 | after_bootmem = 1; |
816 | } | 761 | } |
817 | 762 | ||
@@ -1070,8 +1015,3 @@ void mark_rodata_ro(void) | |||
1070 | } | 1015 | } |
1071 | #endif | 1016 | #endif |
1072 | 1017 | ||
1073 | int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, | ||
1074 | int flags) | ||
1075 | { | ||
1076 | return reserve_bootmem(phys, len, flags); | ||
1077 | } | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9a6674689a20..850bad0a6967 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/initrd.h> | 21 | #include <linux/initrd.h> |
22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/memblock.h> | ||
24 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
25 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
26 | #include <linux/pfn.h> | 27 | #include <linux/pfn.h> |
@@ -52,8 +53,6 @@ | |||
52 | #include <asm/init.h> | 53 | #include <asm/init.h> |
53 | #include <linux/bootmem.h> | 54 | #include <linux/bootmem.h> |
54 | 55 | ||
55 | static unsigned long dma_reserve __initdata; | ||
56 | |||
57 | static int __init parse_direct_gbpages_off(char *arg) | 56 | static int __init parse_direct_gbpages_off(char *arg) |
58 | { | 57 | { |
59 | direct_gbpages = 0; | 58 | direct_gbpages = 0; |
@@ -573,23 +572,7 @@ kernel_physical_mapping_init(unsigned long start, | |||
573 | void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | 572 | void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, |
574 | int acpi, int k8) | 573 | int acpi, int k8) |
575 | { | 574 | { |
576 | #ifndef CONFIG_NO_BOOTMEM | 575 | memblock_x86_register_active_regions(0, start_pfn, end_pfn); |
577 | unsigned long bootmap_size, bootmap; | ||
578 | |||
579 | bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; | ||
580 | bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size, | ||
581 | PAGE_SIZE); | ||
582 | if (bootmap == -1L) | ||
583 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | ||
584 | reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); | ||
585 | /* don't touch min_low_pfn */ | ||
586 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT, | ||
587 | 0, end_pfn); | ||
588 | e820_register_active_regions(0, start_pfn, end_pfn); | ||
589 | free_bootmem_with_active_regions(0, end_pfn); | ||
590 | #else | ||
591 | e820_register_active_regions(0, start_pfn, end_pfn); | ||
592 | #endif | ||
593 | } | 576 | } |
594 | #endif | 577 | #endif |
595 | 578 | ||
@@ -799,52 +782,6 @@ void mark_rodata_ro(void) | |||
799 | 782 | ||
800 | #endif | 783 | #endif |
801 | 784 | ||
802 | int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, | ||
803 | int flags) | ||
804 | { | ||
805 | #ifdef CONFIG_NUMA | ||
806 | int nid, next_nid; | ||
807 | int ret; | ||
808 | #endif | ||
809 | unsigned long pfn = phys >> PAGE_SHIFT; | ||
810 | |||
811 | if (pfn >= max_pfn) { | ||
812 | /* | ||
813 | * This can happen with kdump kernels when accessing | ||
814 | * firmware tables: | ||
815 | */ | ||
816 | if (pfn < max_pfn_mapped) | ||
817 | return -EFAULT; | ||
818 | |||
819 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n", | ||
820 | phys, len); | ||
821 | return -EFAULT; | ||
822 | } | ||
823 | |||
824 | /* Should check here against the e820 map to avoid double free */ | ||
825 | #ifdef CONFIG_NUMA | ||
826 | nid = phys_to_nid(phys); | ||
827 | next_nid = phys_to_nid(phys + len - 1); | ||
828 | if (nid == next_nid) | ||
829 | ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags); | ||
830 | else | ||
831 | ret = reserve_bootmem(phys, len, flags); | ||
832 | |||
833 | if (ret != 0) | ||
834 | return ret; | ||
835 | |||
836 | #else | ||
837 | reserve_bootmem(phys, len, flags); | ||
838 | #endif | ||
839 | |||
840 | if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { | ||
841 | dma_reserve += len / PAGE_SIZE; | ||
842 | set_dma_reserve(dma_reserve); | ||
843 | } | ||
844 | |||
845 | return 0; | ||
846 | } | ||
847 | |||
848 | int kern_addr_valid(unsigned long addr) | 785 | int kern_addr_valid(unsigned long addr) |
849 | { | 786 | { |
850 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; | 787 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 970ed579d4e4..966de9372e8c 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
14 | #include <linux/memblock.h> | ||
15 | |||
14 | #include <asm/io.h> | 16 | #include <asm/io.h> |
15 | #include <linux/pci_ids.h> | 17 | #include <linux/pci_ids.h> |
16 | #include <linux/acpi.h> | 18 | #include <linux/acpi.h> |
@@ -222,7 +224,7 @@ int __init k8_scan_nodes(void) | |||
222 | for_each_node_mask(i, node_possible_map) { | 224 | for_each_node_mask(i, node_possible_map) { |
223 | int j; | 225 | int j; |
224 | 226 | ||
225 | e820_register_active_regions(i, | 227 | memblock_x86_register_active_regions(i, |
226 | nodes[i].start >> PAGE_SHIFT, | 228 | nodes[i].start >> PAGE_SHIFT, |
227 | nodes[i].end >> PAGE_SHIFT); | 229 | nodes[i].end >> PAGE_SHIFT); |
228 | for (j = apicid_base; j < cores + apicid_base; j++) | 230 | for (j = apicid_base; j < cores + apicid_base; j++) |
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c new file mode 100644 index 000000000000..50ecbc59757f --- /dev/null +++ b/arch/x86/mm/memblock.c | |||
@@ -0,0 +1,354 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/bitops.h> | ||
5 | #include <linux/memblock.h> | ||
6 | #include <linux/bootmem.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/range.h> | ||
9 | |||
10 | /* Check for already reserved areas */ | ||
11 | static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) | ||
12 | { | ||
13 | struct memblock_region *r; | ||
14 | u64 addr = *addrp, last; | ||
15 | u64 size = *sizep; | ||
16 | bool changed = false; | ||
17 | |||
18 | again: | ||
19 | last = addr + size; | ||
20 | for_each_memblock(reserved, r) { | ||
21 | if (last > r->base && addr < r->base) { | ||
22 | size = r->base - addr; | ||
23 | changed = true; | ||
24 | goto again; | ||
25 | } | ||
26 | if (last > (r->base + r->size) && addr < (r->base + r->size)) { | ||
27 | addr = round_up(r->base + r->size, align); | ||
28 | size = last - addr; | ||
29 | changed = true; | ||
30 | goto again; | ||
31 | } | ||
32 | if (last <= (r->base + r->size) && addr >= r->base) { | ||
33 | (*sizep)++; | ||
34 | return false; | ||
35 | } | ||
36 | } | ||
37 | if (changed) { | ||
38 | *addrp = addr; | ||
39 | *sizep = size; | ||
40 | } | ||
41 | return changed; | ||
42 | } | ||
43 | |||
44 | static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start, | ||
45 | u64 *sizep, u64 align) | ||
46 | { | ||
47 | u64 addr, last; | ||
48 | |||
49 | addr = round_up(ei_start, align); | ||
50 | if (addr < start) | ||
51 | addr = round_up(start, align); | ||
52 | if (addr >= ei_last) | ||
53 | goto out; | ||
54 | *sizep = ei_last - addr; | ||
55 | while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) | ||
56 | ; | ||
57 | last = addr + *sizep; | ||
58 | if (last > ei_last) | ||
59 | goto out; | ||
60 | |||
61 | return addr; | ||
62 | |||
63 | out: | ||
64 | return MEMBLOCK_ERROR; | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Find next free range after start, and size is returned in *sizep | ||
69 | */ | ||
70 | u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) | ||
71 | { | ||
72 | struct memblock_region *r; | ||
73 | |||
74 | for_each_memblock(memory, r) { | ||
75 | u64 ei_start = r->base; | ||
76 | u64 ei_last = ei_start + r->size; | ||
77 | u64 addr; | ||
78 | |||
79 | addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start, | ||
80 | sizep, align); | ||
81 | |||
82 | if (addr != MEMBLOCK_ERROR) | ||
83 | return addr; | ||
84 | } | ||
85 | |||
86 | return MEMBLOCK_ERROR; | ||
87 | } | ||
88 | |||
89 | static __init struct range *find_range_array(int count) | ||
90 | { | ||
91 | u64 end, size, mem; | ||
92 | struct range *range; | ||
93 | |||
94 | size = sizeof(struct range) * count; | ||
95 | end = memblock.current_limit; | ||
96 | |||
97 | mem = memblock_find_in_range(0, end, size, sizeof(struct range)); | ||
98 | if (mem == MEMBLOCK_ERROR) | ||
99 | panic("can not find more space for range array"); | ||
100 | |||
101 | /* | ||
102 | * This range is tempoaray, so don't reserve it, it will not be | ||
103 | * overlapped because We will not alloccate new buffer before | ||
104 | * We discard this one | ||
105 | */ | ||
106 | range = __va(mem); | ||
107 | memset(range, 0, size); | ||
108 | |||
109 | return range; | ||
110 | } | ||
111 | |||
112 | static void __init memblock_x86_subtract_reserved(struct range *range, int az) | ||
113 | { | ||
114 | u64 final_start, final_end; | ||
115 | struct memblock_region *r; | ||
116 | |||
117 | /* Take out region array itself at first*/ | ||
118 | memblock_free_reserved_regions(); | ||
119 | |||
120 | memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt); | ||
121 | |||
122 | for_each_memblock(reserved, r) { | ||
123 | memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); | ||
124 | final_start = PFN_DOWN(r->base); | ||
125 | final_end = PFN_UP(r->base + r->size); | ||
126 | if (final_start >= final_end) | ||
127 | continue; | ||
128 | subtract_range(range, az, final_start, final_end); | ||
129 | } | ||
130 | |||
131 | /* Put region array back ? */ | ||
132 | memblock_reserve_reserved_regions(); | ||
133 | } | ||
134 | |||
135 | struct count_data { | ||
136 | int nr; | ||
137 | }; | ||
138 | |||
139 | static int __init count_work_fn(unsigned long start_pfn, | ||
140 | unsigned long end_pfn, void *datax) | ||
141 | { | ||
142 | struct count_data *data = datax; | ||
143 | |||
144 | data->nr++; | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static int __init count_early_node_map(int nodeid) | ||
150 | { | ||
151 | struct count_data data; | ||
152 | |||
153 | data.nr = 0; | ||
154 | work_with_active_regions(nodeid, count_work_fn, &data); | ||
155 | |||
156 | return data.nr; | ||
157 | } | ||
158 | |||
159 | int __init get_free_all_memory_range(struct range **rangep, int nodeid) | ||
160 | { | ||
161 | int count; | ||
162 | struct range *range; | ||
163 | int nr_range; | ||
164 | |||
165 | count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2; | ||
166 | |||
167 | range = find_range_array(count); | ||
168 | nr_range = 0; | ||
169 | |||
170 | /* | ||
171 | * Use early_node_map[] and memblock.reserved.region to get range array | ||
172 | * at first | ||
173 | */ | ||
174 | nr_range = add_from_early_node_map(range, count, nr_range, nodeid); | ||
175 | #ifdef CONFIG_X86_32 | ||
176 | subtract_range(range, count, max_low_pfn, -1ULL); | ||
177 | #endif | ||
178 | memblock_x86_subtract_reserved(range, count); | ||
179 | nr_range = clean_sort_range(range, count); | ||
180 | |||
181 | *rangep = range; | ||
182 | return nr_range; | ||
183 | } | ||
184 | |||
185 | static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) | ||
186 | { | ||
187 | int i, count; | ||
188 | struct range *range; | ||
189 | int nr_range; | ||
190 | u64 final_start, final_end; | ||
191 | u64 free_size; | ||
192 | struct memblock_region *r; | ||
193 | |||
194 | count = (memblock.reserved.cnt + memblock.memory.cnt) * 2; | ||
195 | |||
196 | range = find_range_array(count); | ||
197 | nr_range = 0; | ||
198 | |||
199 | addr = PFN_UP(addr); | ||
200 | limit = PFN_DOWN(limit); | ||
201 | |||
202 | for_each_memblock(memory, r) { | ||
203 | final_start = PFN_UP(r->base); | ||
204 | final_end = PFN_DOWN(r->base + r->size); | ||
205 | if (final_start >= final_end) | ||
206 | continue; | ||
207 | if (final_start >= limit || final_end <= addr) | ||
208 | continue; | ||
209 | |||
210 | nr_range = add_range(range, count, nr_range, final_start, final_end); | ||
211 | } | ||
212 | subtract_range(range, count, 0, addr); | ||
213 | subtract_range(range, count, limit, -1ULL); | ||
214 | |||
215 | /* Subtract memblock.reserved.region in range ? */ | ||
216 | if (!get_free) | ||
217 | goto sort_and_count_them; | ||
218 | for_each_memblock(reserved, r) { | ||
219 | final_start = PFN_DOWN(r->base); | ||
220 | final_end = PFN_UP(r->base + r->size); | ||
221 | if (final_start >= final_end) | ||
222 | continue; | ||
223 | if (final_start >= limit || final_end <= addr) | ||
224 | continue; | ||
225 | |||
226 | subtract_range(range, count, final_start, final_end); | ||
227 | } | ||
228 | |||
229 | sort_and_count_them: | ||
230 | nr_range = clean_sort_range(range, count); | ||
231 | |||
232 | free_size = 0; | ||
233 | for (i = 0; i < nr_range; i++) | ||
234 | free_size += range[i].end - range[i].start; | ||
235 | |||
236 | return free_size << PAGE_SHIFT; | ||
237 | } | ||
238 | |||
239 | u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) | ||
240 | { | ||
241 | return __memblock_x86_memory_in_range(addr, limit, true); | ||
242 | } | ||
243 | |||
244 | u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit) | ||
245 | { | ||
246 | return __memblock_x86_memory_in_range(addr, limit, false); | ||
247 | } | ||
248 | |||
249 | void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) | ||
250 | { | ||
251 | if (start == end) | ||
252 | return; | ||
253 | |||
254 | if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end)) | ||
255 | return; | ||
256 | |||
257 | memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name); | ||
258 | |||
259 | memblock_reserve(start, end - start); | ||
260 | } | ||
261 | |||
262 | void __init memblock_x86_free_range(u64 start, u64 end) | ||
263 | { | ||
264 | if (start == end) | ||
265 | return; | ||
266 | |||
267 | if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end)) | ||
268 | return; | ||
269 | |||
270 | memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1); | ||
271 | |||
272 | memblock_free(start, end - start); | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Need to call this function after memblock_x86_register_active_regions, | ||
277 | * so early_node_map[] is filled already. | ||
278 | */ | ||
279 | u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align) | ||
280 | { | ||
281 | u64 addr; | ||
282 | addr = find_memory_core_early(nid, size, align, start, end); | ||
283 | if (addr != MEMBLOCK_ERROR) | ||
284 | return addr; | ||
285 | |||
286 | /* Fallback, should already have start end within node range */ | ||
287 | return memblock_find_in_range(start, end, size, align); | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Finds an active region in the address range from start_pfn to last_pfn and | ||
292 | * returns its range in ei_startpfn and ei_endpfn for the memblock entry. | ||
293 | */ | ||
294 | static int __init memblock_x86_find_active_region(const struct memblock_region *ei, | ||
295 | unsigned long start_pfn, | ||
296 | unsigned long last_pfn, | ||
297 | unsigned long *ei_startpfn, | ||
298 | unsigned long *ei_endpfn) | ||
299 | { | ||
300 | u64 align = PAGE_SIZE; | ||
301 | |||
302 | *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT; | ||
303 | *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT; | ||
304 | |||
305 | /* Skip map entries smaller than a page */ | ||
306 | if (*ei_startpfn >= *ei_endpfn) | ||
307 | return 0; | ||
308 | |||
309 | /* Skip if map is outside the node */ | ||
310 | if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn) | ||
311 | return 0; | ||
312 | |||
313 | /* Check for overlaps */ | ||
314 | if (*ei_startpfn < start_pfn) | ||
315 | *ei_startpfn = start_pfn; | ||
316 | if (*ei_endpfn > last_pfn) | ||
317 | *ei_endpfn = last_pfn; | ||
318 | |||
319 | return 1; | ||
320 | } | ||
321 | |||
322 | /* Walk the memblock.memory map and register active regions within a node */ | ||
323 | void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn, | ||
324 | unsigned long last_pfn) | ||
325 | { | ||
326 | unsigned long ei_startpfn; | ||
327 | unsigned long ei_endpfn; | ||
328 | struct memblock_region *r; | ||
329 | |||
330 | for_each_memblock(memory, r) | ||
331 | if (memblock_x86_find_active_region(r, start_pfn, last_pfn, | ||
332 | &ei_startpfn, &ei_endpfn)) | ||
333 | add_active_range(nid, ei_startpfn, ei_endpfn); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Find the hole size (in bytes) in the memory range. | ||
338 | * @start: starting address of the memory range to scan | ||
339 | * @end: ending address of the memory range to scan | ||
340 | */ | ||
341 | u64 __init memblock_x86_hole_size(u64 start, u64 end) | ||
342 | { | ||
343 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
344 | unsigned long last_pfn = end >> PAGE_SHIFT; | ||
345 | unsigned long ei_startpfn, ei_endpfn, ram = 0; | ||
346 | struct memblock_region *r; | ||
347 | |||
348 | for_each_memblock(memory, r) | ||
349 | if (memblock_x86_find_active_region(r, start_pfn, last_pfn, | ||
350 | &ei_startpfn, &ei_endpfn)) | ||
351 | ram += ei_endpfn - ei_startpfn; | ||
352 | |||
353 | return end - start - ((u64)ram << PAGE_SHIFT); | ||
354 | } | ||
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 18d244f70205..92faf3a1c53e 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c | |||
@@ -6,8 +6,7 @@ | |||
6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/pfn.h> | 8 | #include <linux/pfn.h> |
9 | 9 | #include <linux/memblock.h> | |
10 | #include <asm/e820.h> | ||
11 | 10 | ||
12 | static u64 patterns[] __initdata = { | 11 | static u64 patterns[] __initdata = { |
13 | 0, | 12 | 0, |
@@ -35,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) | |||
35 | (unsigned long long) pattern, | 34 | (unsigned long long) pattern, |
36 | (unsigned long long) start_bad, | 35 | (unsigned long long) start_bad, |
37 | (unsigned long long) end_bad); | 36 | (unsigned long long) end_bad); |
38 | reserve_early(start_bad, end_bad, "BAD RAM"); | 37 | memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM"); |
39 | } | 38 | } |
40 | 39 | ||
41 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) | 40 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) |
@@ -74,7 +73,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end) | |||
74 | u64 size = 0; | 73 | u64 size = 0; |
75 | 74 | ||
76 | while (start < end) { | 75 | while (start < end) { |
77 | start = find_e820_area_size(start, &size, 1); | 76 | start = memblock_x86_find_in_range_size(start, &size, 1); |
78 | 77 | ||
79 | /* done ? */ | 78 | /* done ? */ |
80 | if (start >= end) | 79 | if (start >= end) |
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 809baaaf48b1..70ddeb75ba25 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/bootmem.h> | 26 | #include <linux/bootmem.h> |
27 | #include <linux/memblock.h> | ||
27 | #include <linux/mmzone.h> | 28 | #include <linux/mmzone.h> |
28 | #include <linux/highmem.h> | 29 | #include <linux/highmem.h> |
29 | #include <linux/initrd.h> | 30 | #include <linux/initrd.h> |
@@ -120,7 +121,7 @@ int __init get_memcfg_numa_flat(void) | |||
120 | 121 | ||
121 | node_start_pfn[0] = 0; | 122 | node_start_pfn[0] = 0; |
122 | node_end_pfn[0] = max_pfn; | 123 | node_end_pfn[0] = max_pfn; |
123 | e820_register_active_regions(0, 0, max_pfn); | 124 | memblock_x86_register_active_regions(0, 0, max_pfn); |
124 | memory_present(0, 0, max_pfn); | 125 | memory_present(0, 0, max_pfn); |
125 | node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); | 126 | node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); |
126 | 127 | ||
@@ -161,14 +162,14 @@ static void __init allocate_pgdat(int nid) | |||
161 | NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; | 162 | NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; |
162 | else { | 163 | else { |
163 | unsigned long pgdat_phys; | 164 | unsigned long pgdat_phys; |
164 | pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT, | 165 | pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT, |
165 | max_pfn_mapped<<PAGE_SHIFT, | 166 | max_pfn_mapped<<PAGE_SHIFT, |
166 | sizeof(pg_data_t), | 167 | sizeof(pg_data_t), |
167 | PAGE_SIZE); | 168 | PAGE_SIZE); |
168 | NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT)); | 169 | NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT)); |
169 | memset(buf, 0, sizeof(buf)); | 170 | memset(buf, 0, sizeof(buf)); |
170 | sprintf(buf, "NODE_DATA %d", nid); | 171 | sprintf(buf, "NODE_DATA %d", nid); |
171 | reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); | 172 | memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); |
172 | } | 173 | } |
173 | printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", | 174 | printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", |
174 | nid, (unsigned long)NODE_DATA(nid)); | 175 | nid, (unsigned long)NODE_DATA(nid)); |
@@ -291,15 +292,15 @@ static __init unsigned long calculate_numa_remap_pages(void) | |||
291 | PTRS_PER_PTE); | 292 | PTRS_PER_PTE); |
292 | node_kva_target <<= PAGE_SHIFT; | 293 | node_kva_target <<= PAGE_SHIFT; |
293 | do { | 294 | do { |
294 | node_kva_final = find_e820_area(node_kva_target, | 295 | node_kva_final = memblock_find_in_range(node_kva_target, |
295 | ((u64)node_end_pfn[nid])<<PAGE_SHIFT, | 296 | ((u64)node_end_pfn[nid])<<PAGE_SHIFT, |
296 | ((u64)size)<<PAGE_SHIFT, | 297 | ((u64)size)<<PAGE_SHIFT, |
297 | LARGE_PAGE_BYTES); | 298 | LARGE_PAGE_BYTES); |
298 | node_kva_target -= LARGE_PAGE_BYTES; | 299 | node_kva_target -= LARGE_PAGE_BYTES; |
299 | } while (node_kva_final == -1ULL && | 300 | } while (node_kva_final == MEMBLOCK_ERROR && |
300 | (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); | 301 | (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); |
301 | 302 | ||
302 | if (node_kva_final == -1ULL) | 303 | if (node_kva_final == MEMBLOCK_ERROR) |
303 | panic("Can not get kva ram\n"); | 304 | panic("Can not get kva ram\n"); |
304 | 305 | ||
305 | node_remap_size[nid] = size; | 306 | node_remap_size[nid] = size; |
@@ -318,9 +319,9 @@ static __init unsigned long calculate_numa_remap_pages(void) | |||
318 | * but we could have some hole in high memory, and it will only | 319 | * but we could have some hole in high memory, and it will only |
319 | * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide | 320 | * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide |
320 | * to use it as free. | 321 | * to use it as free. |
321 | * So reserve_early here, hope we don't run out of that array | 322 | * So memblock_x86_reserve_range here, hope we don't run out of that array |
322 | */ | 323 | */ |
323 | reserve_early(node_kva_final, | 324 | memblock_x86_reserve_range(node_kva_final, |
324 | node_kva_final+(((u64)size)<<PAGE_SHIFT), | 325 | node_kva_final+(((u64)size)<<PAGE_SHIFT), |
325 | "KVA RAM"); | 326 | "KVA RAM"); |
326 | 327 | ||
@@ -367,14 +368,14 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | |||
367 | 368 | ||
368 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | 369 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); |
369 | do { | 370 | do { |
370 | kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT, | 371 | kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT, |
371 | max_low_pfn<<PAGE_SHIFT, | 372 | max_low_pfn<<PAGE_SHIFT, |
372 | kva_pages<<PAGE_SHIFT, | 373 | kva_pages<<PAGE_SHIFT, |
373 | PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; | 374 | PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; |
374 | kva_target_pfn -= PTRS_PER_PTE; | 375 | kva_target_pfn -= PTRS_PER_PTE; |
375 | } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn); | 376 | } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn); |
376 | 377 | ||
377 | if (kva_start_pfn == -1UL) | 378 | if (kva_start_pfn == MEMBLOCK_ERROR) |
378 | panic("Can not get kva space\n"); | 379 | panic("Can not get kva space\n"); |
379 | 380 | ||
380 | printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", | 381 | printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", |
@@ -382,7 +383,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | |||
382 | printk(KERN_INFO "max_pfn = %lx\n", max_pfn); | 383 | printk(KERN_INFO "max_pfn = %lx\n", max_pfn); |
383 | 384 | ||
384 | /* avoid clash with initrd */ | 385 | /* avoid clash with initrd */ |
385 | reserve_early(kva_start_pfn<<PAGE_SHIFT, | 386 | memblock_x86_reserve_range(kva_start_pfn<<PAGE_SHIFT, |
386 | (kva_start_pfn + kva_pages)<<PAGE_SHIFT, | 387 | (kva_start_pfn + kva_pages)<<PAGE_SHIFT, |
387 | "KVA PG"); | 388 | "KVA PG"); |
388 | #ifdef CONFIG_HIGHMEM | 389 | #ifdef CONFIG_HIGHMEM |
@@ -419,9 +420,6 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, | |||
419 | for_each_online_node(nid) { | 420 | for_each_online_node(nid) { |
420 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 421 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
421 | NODE_DATA(nid)->node_id = nid; | 422 | NODE_DATA(nid)->node_id = nid; |
422 | #ifndef CONFIG_NO_BOOTMEM | ||
423 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | ||
424 | #endif | ||
425 | } | 423 | } |
426 | 424 | ||
427 | setup_bootmem_allocator(); | 425 | setup_bootmem_allocator(); |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a7bcc23ef96c..aef0ff74f7dd 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/string.h> | 7 | #include <linux/string.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/bootmem.h> | 9 | #include <linux/bootmem.h> |
10 | #include <linux/memblock.h> | ||
10 | #include <linux/mmzone.h> | 11 | #include <linux/mmzone.h> |
11 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
@@ -86,16 +87,16 @@ static int __init allocate_cachealigned_memnodemap(void) | |||
86 | 87 | ||
87 | addr = 0x8000; | 88 | addr = 0x8000; |
88 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); | 89 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); |
89 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, | 90 | nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT, |
90 | nodemap_size, L1_CACHE_BYTES); | 91 | nodemap_size, L1_CACHE_BYTES); |
91 | if (nodemap_addr == -1UL) { | 92 | if (nodemap_addr == MEMBLOCK_ERROR) { |
92 | printk(KERN_ERR | 93 | printk(KERN_ERR |
93 | "NUMA: Unable to allocate Memory to Node hash map\n"); | 94 | "NUMA: Unable to allocate Memory to Node hash map\n"); |
94 | nodemap_addr = nodemap_size = 0; | 95 | nodemap_addr = nodemap_size = 0; |
95 | return -1; | 96 | return -1; |
96 | } | 97 | } |
97 | memnodemap = phys_to_virt(nodemap_addr); | 98 | memnodemap = phys_to_virt(nodemap_addr); |
98 | reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); | 99 | memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); |
99 | 100 | ||
100 | printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", | 101 | printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", |
101 | nodemap_addr, nodemap_addr + nodemap_size); | 102 | nodemap_addr, nodemap_addr + nodemap_size); |
@@ -171,8 +172,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start, | |||
171 | if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) && | 172 | if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) && |
172 | end > (MAX_DMA32_PFN<<PAGE_SHIFT)) | 173 | end > (MAX_DMA32_PFN<<PAGE_SHIFT)) |
173 | start = MAX_DMA32_PFN<<PAGE_SHIFT; | 174 | start = MAX_DMA32_PFN<<PAGE_SHIFT; |
174 | mem = find_e820_area(start, end, size, align); | 175 | mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align); |
175 | if (mem != -1L) | 176 | if (mem != MEMBLOCK_ERROR) |
176 | return __va(mem); | 177 | return __va(mem); |
177 | 178 | ||
178 | /* extend the search scope */ | 179 | /* extend the search scope */ |
@@ -181,8 +182,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start, | |||
181 | start = MAX_DMA32_PFN<<PAGE_SHIFT; | 182 | start = MAX_DMA32_PFN<<PAGE_SHIFT; |
182 | else | 183 | else |
183 | start = MAX_DMA_PFN<<PAGE_SHIFT; | 184 | start = MAX_DMA_PFN<<PAGE_SHIFT; |
184 | mem = find_e820_area(start, end, size, align); | 185 | mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align); |
185 | if (mem != -1L) | 186 | if (mem != MEMBLOCK_ERROR) |
186 | return __va(mem); | 187 | return __va(mem); |
187 | 188 | ||
188 | printk(KERN_ERR "Cannot find %lu bytes in node %d\n", | 189 | printk(KERN_ERR "Cannot find %lu bytes in node %d\n", |
@@ -198,10 +199,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) | |||
198 | unsigned long start_pfn, last_pfn, nodedata_phys; | 199 | unsigned long start_pfn, last_pfn, nodedata_phys; |
199 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | 200 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
200 | int nid; | 201 | int nid; |
201 | #ifndef CONFIG_NO_BOOTMEM | ||
202 | unsigned long bootmap_start, bootmap_pages, bootmap_size; | ||
203 | void *bootmap; | ||
204 | #endif | ||
205 | 202 | ||
206 | if (!end) | 203 | if (!end) |
207 | return; | 204 | return; |
@@ -226,7 +223,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) | |||
226 | if (node_data[nodeid] == NULL) | 223 | if (node_data[nodeid] == NULL) |
227 | return; | 224 | return; |
228 | nodedata_phys = __pa(node_data[nodeid]); | 225 | nodedata_phys = __pa(node_data[nodeid]); |
229 | reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); | 226 | memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); |
230 | printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, | 227 | printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, |
231 | nodedata_phys + pgdat_size - 1); | 228 | nodedata_phys + pgdat_size - 1); |
232 | nid = phys_to_nid(nodedata_phys); | 229 | nid = phys_to_nid(nodedata_phys); |
@@ -238,47 +235,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) | |||
238 | NODE_DATA(nodeid)->node_start_pfn = start_pfn; | 235 | NODE_DATA(nodeid)->node_start_pfn = start_pfn; |
239 | NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; | 236 | NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; |
240 | 237 | ||
241 | #ifndef CONFIG_NO_BOOTMEM | ||
242 | NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid]; | ||
243 | |||
244 | /* | ||
245 | * Find a place for the bootmem map | ||
246 | * nodedata_phys could be on other nodes by alloc_bootmem, | ||
247 | * so need to sure bootmap_start not to be small, otherwise | ||
248 | * early_node_mem will get that with find_e820_area instead | ||
249 | * of alloc_bootmem, that could clash with reserved range | ||
250 | */ | ||
251 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); | ||
252 | bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE); | ||
253 | /* | ||
254 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like | ||
255 | * to use that to align to PAGE_SIZE | ||
256 | */ | ||
257 | bootmap = early_node_mem(nodeid, bootmap_start, end, | ||
258 | bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); | ||
259 | if (bootmap == NULL) { | ||
260 | free_early(nodedata_phys, nodedata_phys + pgdat_size); | ||
261 | node_data[nodeid] = NULL; | ||
262 | return; | ||
263 | } | ||
264 | bootmap_start = __pa(bootmap); | ||
265 | reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT), | ||
266 | "BOOTMAP"); | ||
267 | |||
268 | bootmap_size = init_bootmem_node(NODE_DATA(nodeid), | ||
269 | bootmap_start >> PAGE_SHIFT, | ||
270 | start_pfn, last_pfn); | ||
271 | |||
272 | printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n", | ||
273 | bootmap_start, bootmap_start + bootmap_size - 1, | ||
274 | bootmap_pages); | ||
275 | nid = phys_to_nid(bootmap_start); | ||
276 | if (nid != nodeid) | ||
277 | printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid); | ||
278 | |||
279 | free_bootmem_with_active_regions(nodeid, end); | ||
280 | #endif | ||
281 | |||
282 | node_set_online(nodeid); | 238 | node_set_online(nodeid); |
283 | } | 239 | } |
284 | 240 | ||
@@ -416,7 +372,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, | |||
416 | nr_nodes = MAX_NUMNODES; | 372 | nr_nodes = MAX_NUMNODES; |
417 | } | 373 | } |
418 | 374 | ||
419 | size = (max_addr - addr - e820_hole_size(addr, max_addr)) / nr_nodes; | 375 | size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; |
420 | /* | 376 | /* |
421 | * Calculate the number of big nodes that can be allocated as a result | 377 | * Calculate the number of big nodes that can be allocated as a result |
422 | * of consolidating the remainder. | 378 | * of consolidating the remainder. |
@@ -452,7 +408,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, | |||
452 | * non-reserved memory is less than the per-node size. | 408 | * non-reserved memory is less than the per-node size. |
453 | */ | 409 | */ |
454 | while (end - physnodes[i].start - | 410 | while (end - physnodes[i].start - |
455 | e820_hole_size(physnodes[i].start, end) < size) { | 411 | memblock_x86_hole_size(physnodes[i].start, end) < size) { |
456 | end += FAKE_NODE_MIN_SIZE; | 412 | end += FAKE_NODE_MIN_SIZE; |
457 | if (end > physnodes[i].end) { | 413 | if (end > physnodes[i].end) { |
458 | end = physnodes[i].end; | 414 | end = physnodes[i].end; |
@@ -466,7 +422,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, | |||
466 | * this one must extend to the boundary. | 422 | * this one must extend to the boundary. |
467 | */ | 423 | */ |
468 | if (end < dma32_end && dma32_end - end - | 424 | if (end < dma32_end && dma32_end - end - |
469 | e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | 425 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) |
470 | end = dma32_end; | 426 | end = dma32_end; |
471 | 427 | ||
472 | /* | 428 | /* |
@@ -475,7 +431,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, | |||
475 | * physical node. | 431 | * physical node. |
476 | */ | 432 | */ |
477 | if (physnodes[i].end - end - | 433 | if (physnodes[i].end - end - |
478 | e820_hole_size(end, physnodes[i].end) < size) | 434 | memblock_x86_hole_size(end, physnodes[i].end) < size) |
479 | end = physnodes[i].end; | 435 | end = physnodes[i].end; |
480 | 436 | ||
481 | /* | 437 | /* |
@@ -503,7 +459,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) | |||
503 | { | 459 | { |
504 | u64 end = start + size; | 460 | u64 end = start + size; |
505 | 461 | ||
506 | while (end - start - e820_hole_size(start, end) < size) { | 462 | while (end - start - memblock_x86_hole_size(start, end) < size) { |
507 | end += FAKE_NODE_MIN_SIZE; | 463 | end += FAKE_NODE_MIN_SIZE; |
508 | if (end > max_addr) { | 464 | if (end > max_addr) { |
509 | end = max_addr; | 465 | end = max_addr; |
@@ -532,7 +488,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) | |||
532 | * creates a uniform distribution of node sizes across the entire | 488 | * creates a uniform distribution of node sizes across the entire |
533 | * machine (but not necessarily over physical nodes). | 489 | * machine (but not necessarily over physical nodes). |
534 | */ | 490 | */ |
535 | min_size = (max_addr - addr - e820_hole_size(addr, max_addr)) / | 491 | min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / |
536 | MAX_NUMNODES; | 492 | MAX_NUMNODES; |
537 | min_size = max(min_size, FAKE_NODE_MIN_SIZE); | 493 | min_size = max(min_size, FAKE_NODE_MIN_SIZE); |
538 | if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) | 494 | if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) |
@@ -565,7 +521,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) | |||
565 | * this one must extend to the boundary. | 521 | * this one must extend to the boundary. |
566 | */ | 522 | */ |
567 | if (end < dma32_end && dma32_end - end - | 523 | if (end < dma32_end && dma32_end - end - |
568 | e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) | 524 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) |
569 | end = dma32_end; | 525 | end = dma32_end; |
570 | 526 | ||
571 | /* | 527 | /* |
@@ -574,7 +530,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) | |||
574 | * physical node. | 530 | * physical node. |
575 | */ | 531 | */ |
576 | if (physnodes[i].end - end - | 532 | if (physnodes[i].end - end - |
577 | e820_hole_size(end, physnodes[i].end) < size) | 533 | memblock_x86_hole_size(end, physnodes[i].end) < size) |
578 | end = physnodes[i].end; | 534 | end = physnodes[i].end; |
579 | 535 | ||
580 | /* | 536 | /* |
@@ -638,7 +594,7 @@ static int __init numa_emulation(unsigned long start_pfn, | |||
638 | */ | 594 | */ |
639 | remove_all_active_ranges(); | 595 | remove_all_active_ranges(); |
640 | for_each_node_mask(i, node_possible_map) { | 596 | for_each_node_mask(i, node_possible_map) { |
641 | e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, | 597 | memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, |
642 | nodes[i].end >> PAGE_SHIFT); | 598 | nodes[i].end >> PAGE_SHIFT); |
643 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | 599 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
644 | } | 600 | } |
@@ -691,7 +647,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, | |||
691 | node_set(0, node_possible_map); | 647 | node_set(0, node_possible_map); |
692 | for (i = 0; i < nr_cpu_ids; i++) | 648 | for (i = 0; i < nr_cpu_ids; i++) |
693 | numa_set_node(i, 0); | 649 | numa_set_node(i, 0); |
694 | e820_register_active_regions(0, start_pfn, last_pfn); | 650 | memblock_x86_register_active_regions(0, start_pfn, last_pfn); |
695 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); | 651 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); |
696 | } | 652 | } |
697 | 653 | ||
@@ -703,9 +659,7 @@ unsigned long __init numa_free_all_bootmem(void) | |||
703 | for_each_online_node(i) | 659 | for_each_online_node(i) |
704 | pages += free_all_bootmem_node(NODE_DATA(i)); | 660 | pages += free_all_bootmem_node(NODE_DATA(i)); |
705 | 661 | ||
706 | #ifdef CONFIG_NO_BOOTMEM | ||
707 | pages += free_all_memory_core_early(MAX_NUMNODES); | 662 | pages += free_all_memory_core_early(MAX_NUMNODES); |
708 | #endif | ||
709 | 663 | ||
710 | return pages; | 664 | return pages; |
711 | } | 665 | } |
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 9324f13492d5..a17dffd136c1 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/bootmem.h> | 27 | #include <linux/bootmem.h> |
28 | #include <linux/memblock.h> | ||
28 | #include <linux/mmzone.h> | 29 | #include <linux/mmzone.h> |
29 | #include <linux/acpi.h> | 30 | #include <linux/acpi.h> |
30 | #include <linux/nodemask.h> | 31 | #include <linux/nodemask.h> |
@@ -264,7 +265,7 @@ int __init get_memcfg_from_srat(void) | |||
264 | if (node_read_chunk(chunk->nid, chunk)) | 265 | if (node_read_chunk(chunk->nid, chunk)) |
265 | continue; | 266 | continue; |
266 | 267 | ||
267 | e820_register_active_regions(chunk->nid, chunk->start_pfn, | 268 | memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn, |
268 | min(chunk->end_pfn, max_pfn)); | 269 | min(chunk->end_pfn, max_pfn)); |
269 | } | 270 | } |
270 | /* for out of order entries in SRAT */ | 271 | /* for out of order entries in SRAT */ |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index f9897f7a9ef1..7f44eb62a5e9 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/topology.h> | 17 | #include <linux/topology.h> |
18 | #include <linux/bootmem.h> | 18 | #include <linux/bootmem.h> |
19 | #include <linux/memblock.h> | ||
19 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
20 | #include <asm/proto.h> | 21 | #include <asm/proto.h> |
21 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
@@ -98,15 +99,15 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | |||
98 | unsigned long phys; | 99 | unsigned long phys; |
99 | 100 | ||
100 | length = slit->header.length; | 101 | length = slit->header.length; |
101 | phys = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, length, | 102 | phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length, |
102 | PAGE_SIZE); | 103 | PAGE_SIZE); |
103 | 104 | ||
104 | if (phys == -1L) | 105 | if (phys == MEMBLOCK_ERROR) |
105 | panic(" Can not save slit!\n"); | 106 | panic(" Can not save slit!\n"); |
106 | 107 | ||
107 | acpi_slit = __va(phys); | 108 | acpi_slit = __va(phys); |
108 | memcpy(acpi_slit, slit, length); | 109 | memcpy(acpi_slit, slit, length); |
109 | reserve_early(phys, phys + length, "ACPI SLIT"); | 110 | memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT"); |
110 | } | 111 | } |
111 | 112 | ||
112 | /* Callback for Proximity Domain -> x2APIC mapping */ | 113 | /* Callback for Proximity Domain -> x2APIC mapping */ |
@@ -324,7 +325,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) | |||
324 | pxmram = 0; | 325 | pxmram = 0; |
325 | } | 326 | } |
326 | 327 | ||
327 | e820ram = max_pfn - (e820_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT); | 328 | e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT); |
328 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | 329 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ |
329 | if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { | 330 | if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { |
330 | printk(KERN_ERR | 331 | printk(KERN_ERR |
@@ -421,7 +422,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) | |||
421 | } | 422 | } |
422 | 423 | ||
423 | for_each_node_mask(i, nodes_parsed) | 424 | for_each_node_mask(i, nodes_parsed) |
424 | e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, | 425 | memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, |
425 | nodes[i].end >> PAGE_SHIFT); | 426 | nodes[i].end >> PAGE_SHIFT); |
426 | /* for out of order entries in SRAT */ | 427 | /* for out of order entries in SRAT */ |
427 | sort_node_map(); | 428 | sort_node_map(); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42086ac406af..4fe04ac0bae0 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/vmalloc.h> | 45 | #include <linux/vmalloc.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/gfp.h> | 47 | #include <linux/gfp.h> |
48 | #include <linux/memblock.h> | ||
48 | 49 | ||
49 | #include <asm/pgtable.h> | 50 | #include <asm/pgtable.h> |
50 | #include <asm/tlbflush.h> | 51 | #include <asm/tlbflush.h> |
@@ -1814,7 +1815,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1814 | __xen_write_cr3(true, __pa(pgd)); | 1815 | __xen_write_cr3(true, __pa(pgd)); |
1815 | xen_mc_issue(PARAVIRT_LAZY_CPU); | 1816 | xen_mc_issue(PARAVIRT_LAZY_CPU); |
1816 | 1817 | ||
1817 | reserve_early(__pa(xen_start_info->pt_base), | 1818 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), |
1818 | __pa(xen_start_info->pt_base + | 1819 | __pa(xen_start_info->pt_base + |
1819 | xen_start_info->nr_pt_frames * PAGE_SIZE), | 1820 | xen_start_info->nr_pt_frames * PAGE_SIZE), |
1820 | "XEN PAGETABLES"); | 1821 | "XEN PAGETABLES"); |
@@ -1852,7 +1853,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1852 | 1853 | ||
1853 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); | 1854 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); |
1854 | 1855 | ||
1855 | reserve_early(__pa(xen_start_info->pt_base), | 1856 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), |
1856 | __pa(xen_start_info->pt_base + | 1857 | __pa(xen_start_info->pt_base + |
1857 | xen_start_info->nr_pt_frames * PAGE_SIZE), | 1858 | xen_start_info->nr_pt_frames * PAGE_SIZE), |
1858 | "XEN PAGETABLES"); | 1859 | "XEN PAGETABLES"); |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 328b00305426..9729c903404b 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/pm.h> | 10 | #include <linux/pm.h> |
11 | #include <linux/memblock.h> | ||
11 | 12 | ||
12 | #include <asm/elf.h> | 13 | #include <asm/elf.h> |
13 | #include <asm/vdso.h> | 14 | #include <asm/vdso.h> |
@@ -129,7 +130,7 @@ char * __init xen_memory_setup(void) | |||
129 | * - xen_start_info | 130 | * - xen_start_info |
130 | * See comment above "struct start_info" in <xen/interface/xen.h> | 131 | * See comment above "struct start_info" in <xen/interface/xen.h> |
131 | */ | 132 | */ |
132 | reserve_early(__pa(xen_start_info->mfn_list), | 133 | memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), |
133 | __pa(xen_start_info->pt_base), | 134 | __pa(xen_start_info->pt_base), |
134 | "XEN START INFO"); | 135 | "XEN START INFO"); |
135 | 136 | ||