diff options
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r-- | arch/x86/kernel/setup.c | 52 |
1 files changed, 8 insertions, 44 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9d43b28e0728..4be9b398470e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -294,30 +294,11 @@ static void __init init_gbpages(void) | |||
294 | else | 294 | else |
295 | direct_gbpages = 0; | 295 | direct_gbpages = 0; |
296 | } | 296 | } |
297 | |||
298 | static void __init cleanup_highmap_brk_end(void) | ||
299 | { | ||
300 | pud_t *pud; | ||
301 | pmd_t *pmd; | ||
302 | |||
303 | mmu_cr4_features = read_cr4(); | ||
304 | |||
305 | /* | ||
306 | * _brk_end cannot change anymore, but it and _end may be | ||
307 | * located on different 2M pages. cleanup_highmap(), however, | ||
308 | * can only consider _end when it runs, so destroy any | ||
309 | * mappings beyond _brk_end here. | ||
310 | */ | ||
311 | pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); | ||
312 | pmd = pmd_offset(pud, _brk_end - 1); | ||
313 | while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) | ||
314 | pmd_clear(pmd); | ||
315 | } | ||
316 | #else | 297 | #else |
317 | static inline void init_gbpages(void) | 298 | static inline void init_gbpages(void) |
318 | { | 299 | { |
319 | } | 300 | } |
320 | static inline void cleanup_highmap_brk_end(void) | 301 | static void __init cleanup_highmap(void) |
321 | { | 302 | { |
322 | } | 303 | } |
323 | #endif | 304 | #endif |
@@ -330,8 +311,6 @@ static void __init reserve_brk(void) | |||
330 | /* Mark brk area as locked down and no longer taking any | 311 | /* Mark brk area as locked down and no longer taking any |
331 | new allocations */ | 312 | new allocations */ |
332 | _brk_start = 0; | 313 | _brk_start = 0; |
333 | |||
334 | cleanup_highmap_brk_end(); | ||
335 | } | 314 | } |
336 | 315 | ||
337 | #ifdef CONFIG_BLK_DEV_INITRD | 316 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -640,28 +619,6 @@ void __init reserve_standard_io_resources(void) | |||
640 | 619 | ||
641 | } | 620 | } |
642 | 621 | ||
643 | /* | ||
644 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by | ||
645 | * is_kdump_kernel() to determine if we are booting after a panic. Hence | ||
646 | * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. | ||
647 | */ | ||
648 | |||
649 | #ifdef CONFIG_CRASH_DUMP | ||
650 | /* elfcorehdr= specifies the location of elf core header | ||
651 | * stored by the crashed kernel. This option will be passed | ||
652 | * by kexec loader to the capture kernel. | ||
653 | */ | ||
654 | static int __init setup_elfcorehdr(char *arg) | ||
655 | { | ||
656 | char *end; | ||
657 | if (!arg) | ||
658 | return -EINVAL; | ||
659 | elfcorehdr_addr = memparse(arg, &end); | ||
660 | return end > arg ? 0 : -EINVAL; | ||
661 | } | ||
662 | early_param("elfcorehdr", setup_elfcorehdr); | ||
663 | #endif | ||
664 | |||
665 | static __init void reserve_ibft_region(void) | 622 | static __init void reserve_ibft_region(void) |
666 | { | 623 | { |
667 | unsigned long addr, size = 0; | 624 | unsigned long addr, size = 0; |
@@ -950,6 +907,8 @@ void __init setup_arch(char **cmdline_p) | |||
950 | */ | 907 | */ |
951 | reserve_brk(); | 908 | reserve_brk(); |
952 | 909 | ||
910 | cleanup_highmap(); | ||
911 | |||
953 | memblock.current_limit = get_max_mapped(); | 912 | memblock.current_limit = get_max_mapped(); |
954 | memblock_x86_fill(); | 913 | memblock_x86_fill(); |
955 | 914 | ||
@@ -1017,6 +976,11 @@ void __init setup_arch(char **cmdline_p) | |||
1017 | paging_init(); | 976 | paging_init(); |
1018 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); | 977 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); |
1019 | 978 | ||
979 | if (boot_cpu_data.cpuid_level >= 0) { | ||
980 | /* A CPU has %cr4 if and only if it has CPUID */ | ||
981 | mmu_cr4_features = read_cr4(); | ||
982 | } | ||
983 | |||
1020 | #ifdef CONFIG_X86_32 | 984 | #ifdef CONFIG_X86_32 |
1021 | /* sync back kernel address range */ | 985 | /* sync back kernel address range */ |
1022 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, | 986 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, |