diff options
Diffstat (limited to 'arch/microblaze/mm/init.c')
-rw-r--r-- | arch/microblaze/mm/init.c | 64 |
1 files changed, 6 insertions, 58 deletions
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 434639f9a3a6..df6de7ccdc2e 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -32,9 +32,6 @@ int mem_init_done; | |||
32 | #ifndef CONFIG_MMU | 32 | #ifndef CONFIG_MMU |
33 | unsigned int __page_offset; | 33 | unsigned int __page_offset; |
34 | EXPORT_SYMBOL(__page_offset); | 34 | EXPORT_SYMBOL(__page_offset); |
35 | |||
36 | #else | ||
37 | static int init_bootmem_done; | ||
38 | #endif /* CONFIG_MMU */ | 35 | #endif /* CONFIG_MMU */ |
39 | 36 | ||
40 | char *klimit = _end; | 37 | char *klimit = _end; |
@@ -117,7 +114,6 @@ static void __init paging_init(void) | |||
117 | 114 | ||
118 | void __init setup_memory(void) | 115 | void __init setup_memory(void) |
119 | { | 116 | { |
120 | unsigned long map_size; | ||
121 | struct memblock_region *reg; | 117 | struct memblock_region *reg; |
122 | 118 | ||
123 | #ifndef CONFIG_MMU | 119 | #ifndef CONFIG_MMU |
@@ -174,17 +170,6 @@ void __init setup_memory(void) | |||
174 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); | 170 | pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); |
175 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); | 171 | pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); |
176 | 172 | ||
177 | /* | ||
178 | * Find an area to use for the bootmem bitmap. | ||
179 | * We look for the first area which is at least | ||
180 | * 128kB in length (128kB is enough for a bitmap | ||
181 | * for 4GB of memory, using 4kB pages), plus 1 page | ||
182 | * (in case the address isn't page-aligned). | ||
183 | */ | ||
184 | map_size = init_bootmem_node(NODE_DATA(0), | ||
185 | PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); | ||
186 | memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); | ||
187 | |||
188 | /* Add active regions with valid PFNs */ | 173 | /* Add active regions with valid PFNs */ |
189 | for_each_memblock(memory, reg) { | 174 | for_each_memblock(memory, reg) { |
190 | unsigned long start_pfn, end_pfn; | 175 | unsigned long start_pfn, end_pfn; |
@@ -196,32 +181,9 @@ void __init setup_memory(void) | |||
196 | &memblock.memory, 0); | 181 | &memblock.memory, 0); |
197 | } | 182 | } |
198 | 183 | ||
199 | /* free bootmem is whole main memory */ | ||
200 | free_bootmem_with_active_regions(0, max_low_pfn); | ||
201 | |||
202 | /* reserve allocate blocks */ | ||
203 | for_each_memblock(reserved, reg) { | ||
204 | unsigned long top = reg->base + reg->size - 1; | ||
205 | |||
206 | pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", | ||
207 | (u32) reg->base, (u32) reg->size, top, | ||
208 | memory_start + lowmem_size - 1); | ||
209 | |||
210 | if (top <= (memory_start + lowmem_size - 1)) { | ||
211 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); | ||
212 | } else if (reg->base < (memory_start + lowmem_size - 1)) { | ||
213 | unsigned long trunc_size = memory_start + lowmem_size - | ||
214 | reg->base; | ||
215 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* XXX need to clip this if using highmem? */ | 184 | /* XXX need to clip this if using highmem? */ |
220 | sparse_memory_present_with_active_regions(0); | 185 | sparse_memory_present_with_active_regions(0); |
221 | 186 | ||
222 | #ifdef CONFIG_MMU | ||
223 | init_bootmem_done = 1; | ||
224 | #endif | ||
225 | paging_init(); | 187 | paging_init(); |
226 | } | 188 | } |
227 | 189 | ||
@@ -398,30 +360,16 @@ asmlinkage void __init mmu_init(void) | |||
398 | /* This is only called until mem_init is done. */ | 360 | /* This is only called until mem_init is done. */ |
399 | void __init *early_get_page(void) | 361 | void __init *early_get_page(void) |
400 | { | 362 | { |
401 | void *p; | 363 | /* |
402 | if (init_bootmem_done) { | 364 | * Mem start + kernel_tlb -> here is limit |
403 | p = alloc_bootmem_pages(PAGE_SIZE); | 365 | * because of mem mapping from head.S |
404 | } else { | 366 | */ |
405 | /* | 367 | return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, |
406 | * Mem start + kernel_tlb -> here is limit | 368 | memory_start + kernel_tlb)); |
407 | * because of mem mapping from head.S | ||
408 | */ | ||
409 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
410 | memory_start + kernel_tlb)); | ||
411 | } | ||
412 | return p; | ||
413 | } | 369 | } |
414 | 370 | ||
415 | #endif /* CONFIG_MMU */ | 371 | #endif /* CONFIG_MMU */ |
416 | 372 | ||
417 | void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask) | ||
418 | { | ||
419 | if (mem_init_done) | ||
420 | return kmalloc(size, mask); | ||
421 | else | ||
422 | return alloc_bootmem(size); | ||
423 | } | ||
424 | |||
425 | void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) | 373 | void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) |
426 | { | 374 | { |
427 | void *p; | 375 | void *p; |