diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-05-11 00:32:19 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-05-11 00:32:19 -0400 |
commit | 4bc277ac9cae60e11fe2e557e4ea4acb56d3dc9a (patch) | |
tree | c9ed42c605674b2e41d8c962ad25c021fd518dbc /arch/sh/kernel/setup.c | |
parent | 5e2ff328c0668794ff408a4632f5b8a62827571f (diff) |
sh: bootmem refactoring.
This reworks much of the bootmem setup and initialization code allowing
us to get rid of duplicate work between the NUMA and non-NUMA cases. The
end result is that we end up with a much more flexible interface for
supporting more complex topologies (fake NUMA, highmem, etc, etc.) which
is entirely LMB backed. This is an incremental step for more NUMA work as
well as gradually enabling migration off of bootmem entirely.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/setup.c')
-rw-r--r-- | arch/sh/kernel/setup.c | 143 |
1 files changed, 4 insertions, 139 deletions
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index f6a2db12ad78..61404ed01449 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -114,31 +114,7 @@ static int __init early_parse_mem(char *p) | |||
114 | } | 114 | } |
115 | early_param("mem", early_parse_mem); | 115 | early_param("mem", early_parse_mem); |
116 | 116 | ||
117 | /* | 117 | void __init check_for_initrd(void) |
118 | * Register fully available low RAM pages with the bootmem allocator. | ||
119 | */ | ||
120 | static void __init register_bootmem_low_pages(void) | ||
121 | { | ||
122 | unsigned long curr_pfn, last_pfn, pages; | ||
123 | |||
124 | /* | ||
125 | * We are rounding up the start address of usable memory: | ||
126 | */ | ||
127 | curr_pfn = PFN_UP(__MEMORY_START); | ||
128 | |||
129 | /* | ||
130 | * ... and at the end of the usable range downwards: | ||
131 | */ | ||
132 | last_pfn = PFN_DOWN(__pa(memory_end)); | ||
133 | |||
134 | if (last_pfn > max_low_pfn) | ||
135 | last_pfn = max_low_pfn; | ||
136 | |||
137 | pages = last_pfn - curr_pfn; | ||
138 | free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages)); | ||
139 | } | ||
140 | |||
141 | static void __init check_for_initrd(void) | ||
142 | { | 118 | { |
143 | #ifdef CONFIG_BLK_DEV_INITRD | 119 | #ifdef CONFIG_BLK_DEV_INITRD |
144 | unsigned long start, end; | 120 | unsigned long start, end; |
@@ -240,85 +216,6 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
240 | add_active_range(nid, start_pfn, end_pfn); | 216 | add_active_range(nid, start_pfn, end_pfn); |
241 | } | 217 | } |
242 | 218 | ||
243 | void __init do_init_bootmem(void) | ||
244 | { | ||
245 | unsigned long bootmap_size; | ||
246 | unsigned long bootmap_pages, bootmem_paddr; | ||
247 | u64 total_pages = lmb_phys_mem_size() >> PAGE_SHIFT; | ||
248 | int i; | ||
249 | |||
250 | bootmap_pages = bootmem_bootmap_pages(total_pages); | ||
251 | |||
252 | bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); | ||
253 | |||
254 | /* | ||
255 | * Find a proper area for the bootmem bitmap. After this | ||
256 | * bootstrap step all allocations (until the page allocator | ||
257 | * is intact) must be done via bootmem_alloc(). | ||
258 | */ | ||
259 | bootmap_size = init_bootmem_node(NODE_DATA(0), | ||
260 | bootmem_paddr >> PAGE_SHIFT, | ||
261 | min_low_pfn, max_low_pfn); | ||
262 | |||
263 | /* Add active regions with valid PFNs. */ | ||
264 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
265 | unsigned long start_pfn, end_pfn; | ||
266 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | ||
267 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | ||
268 | __add_active_range(0, start_pfn, end_pfn); | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Add all physical memory to the bootmem map and mark each | ||
273 | * area as present. | ||
274 | */ | ||
275 | register_bootmem_low_pages(); | ||
276 | |||
277 | /* Reserve the sections we're already using. */ | ||
278 | for (i = 0; i < lmb.reserved.cnt; i++) | ||
279 | reserve_bootmem(lmb.reserved.region[i].base, | ||
280 | lmb_size_bytes(&lmb.reserved, i), | ||
281 | BOOTMEM_DEFAULT); | ||
282 | |||
283 | node_set_online(0); | ||
284 | |||
285 | sparse_memory_present_with_active_regions(0); | ||
286 | } | ||
287 | |||
288 | static void __init early_reserve_mem(void) | ||
289 | { | ||
290 | unsigned long start_pfn; | ||
291 | |||
292 | /* | ||
293 | * Partially used pages are not usable - thus | ||
294 | * we are rounding upwards: | ||
295 | */ | ||
296 | start_pfn = PFN_UP(__pa(_end)); | ||
297 | |||
298 | /* | ||
299 | * Reserve the kernel text and | ||
300 | * Reserve the bootmem bitmap. We do this in two steps (first step | ||
301 | * was init_bootmem()), because this catches the (definitely buggy) | ||
302 | * case of us accidentally initializing the bootmem allocator with | ||
303 | * an invalid RAM area. | ||
304 | */ | ||
305 | lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, | ||
306 | (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - | ||
307 | (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); | ||
308 | |||
309 | /* | ||
310 | * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. | ||
311 | */ | ||
312 | if (CONFIG_ZERO_PAGE_OFFSET != 0) | ||
313 | lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); | ||
314 | |||
315 | /* | ||
316 | * Handle additional early reservations | ||
317 | */ | ||
318 | check_for_initrd(); | ||
319 | reserve_crashkernel(); | ||
320 | } | ||
321 | |||
322 | /* | 219 | /* |
323 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by | 220 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by |
324 | * is_kdump_kernel() to determine if we are booting after a panic. Hence | 221 | * is_kdump_kernel() to determine if we are booting after a panic. Hence |
@@ -342,10 +239,6 @@ void __init __weak plat_early_device_setup(void) | |||
342 | { | 239 | { |
343 | } | 240 | } |
344 | 241 | ||
345 | void __init __weak plat_mem_setup(void) | ||
346 | { | ||
347 | } | ||
348 | |||
349 | void __init setup_arch(char **cmdline_p) | 242 | void __init setup_arch(char **cmdline_p) |
350 | { | 243 | { |
351 | enable_mmu(); | 244 | enable_mmu(); |
@@ -401,44 +294,16 @@ void __init setup_arch(char **cmdline_p) | |||
401 | 294 | ||
402 | plat_early_device_setup(); | 295 | plat_early_device_setup(); |
403 | 296 | ||
404 | /* Let earlyprintk output early console messages */ | ||
405 | early_platform_driver_probe("earlyprintk", 1, 1); | ||
406 | |||
407 | lmb_init(); | ||
408 | |||
409 | sh_mv_setup(); | 297 | sh_mv_setup(); |
410 | sh_mv.mv_mem_init(); | ||
411 | |||
412 | early_reserve_mem(); | ||
413 | 298 | ||
414 | lmb_enforce_memory_limit(memory_limit); | 299 | /* Let earlyprintk output early console messages */ |
415 | lmb_analyze(); | 300 | early_platform_driver_probe("earlyprintk", 1, 1); |
416 | |||
417 | lmb_dump_all(); | ||
418 | |||
419 | /* | ||
420 | * Determine low and high memory ranges: | ||
421 | */ | ||
422 | max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | ||
423 | min_low_pfn = __MEMORY_START >> PAGE_SHIFT; | ||
424 | |||
425 | nodes_clear(node_online_map); | ||
426 | |||
427 | memory_start = (unsigned long)__va(__MEMORY_START); | ||
428 | memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); | ||
429 | 301 | ||
430 | uncached_init(); | 302 | paging_init(); |
431 | pmb_init(); | ||
432 | do_init_bootmem(); | ||
433 | plat_mem_setup(); | ||
434 | sparse_init(); | ||
435 | 303 | ||
436 | #ifdef CONFIG_DUMMY_CONSOLE | 304 | #ifdef CONFIG_DUMMY_CONSOLE |
437 | conswitchp = &dummy_con; | 305 | conswitchp = &dummy_con; |
438 | #endif | 306 | #endif |
439 | paging_init(); | ||
440 | |||
441 | ioremap_fixed_init(); | ||
442 | 307 | ||
443 | /* Perform the machine specific initialisation */ | 308 | /* Perform the machine specific initialisation */ |
444 | if (likely(sh_mv.mv_setup)) | 309 | if (likely(sh_mv.mv_setup)) |