diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/kernel/setup.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r-- | arch/x86/kernel/setup.c | 327 |
1 files changed, 152 insertions, 175 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c3a4fbb2b996..afaf38447ef5 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/apm_bios.h> | 31 | #include <linux/apm_bios.h> |
32 | #include <linux/initrd.h> | 32 | #include <linux/initrd.h> |
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/memblock.h> | ||
34 | #include <linux/seq_file.h> | 35 | #include <linux/seq_file.h> |
35 | #include <linux/console.h> | 36 | #include <linux/console.h> |
36 | #include <linux/mca.h> | 37 | #include <linux/mca.h> |
@@ -83,7 +84,6 @@ | |||
83 | #include <asm/dmi.h> | 84 | #include <asm/dmi.h> |
84 | #include <asm/io_apic.h> | 85 | #include <asm/io_apic.h> |
85 | #include <asm/ist.h> | 86 | #include <asm/ist.h> |
86 | #include <asm/vmi.h> | ||
87 | #include <asm/setup_arch.h> | 87 | #include <asm/setup_arch.h> |
88 | #include <asm/bios_ebda.h> | 88 | #include <asm/bios_ebda.h> |
89 | #include <asm/cacheflush.h> | 89 | #include <asm/cacheflush.h> |
@@ -107,11 +107,13 @@ | |||
107 | #include <asm/percpu.h> | 107 | #include <asm/percpu.h> |
108 | #include <asm/topology.h> | 108 | #include <asm/topology.h> |
109 | #include <asm/apicdef.h> | 109 | #include <asm/apicdef.h> |
110 | #include <asm/k8.h> | 110 | #include <asm/amd_nb.h> |
111 | #ifdef CONFIG_X86_64 | 111 | #ifdef CONFIG_X86_64 |
112 | #include <asm/numa_64.h> | 112 | #include <asm/numa_64.h> |
113 | #endif | 113 | #endif |
114 | #include <asm/mce.h> | 114 | #include <asm/mce.h> |
115 | #include <asm/alternative.h> | ||
116 | #include <asm/prom.h> | ||
115 | 117 | ||
116 | /* | 118 | /* |
117 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | 119 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. |
@@ -125,7 +127,6 @@ unsigned long max_pfn_mapped; | |||
125 | RESERVE_BRK(dmi_alloc, 65536); | 127 | RESERVE_BRK(dmi_alloc, 65536); |
126 | #endif | 128 | #endif |
127 | 129 | ||
128 | unsigned int boot_cpu_id __read_mostly; | ||
129 | 130 | ||
130 | static __initdata unsigned long _brk_start = (unsigned long)__brk_base; | 131 | static __initdata unsigned long _brk_start = (unsigned long)__brk_base; |
131 | unsigned long _brk_end = (unsigned long)__brk_base; | 132 | unsigned long _brk_end = (unsigned long)__brk_base; |
@@ -297,12 +298,15 @@ static void __init init_gbpages(void) | |||
297 | static inline void init_gbpages(void) | 298 | static inline void init_gbpages(void) |
298 | { | 299 | { |
299 | } | 300 | } |
301 | static void __init cleanup_highmap(void) | ||
302 | { | ||
303 | } | ||
300 | #endif | 304 | #endif |
301 | 305 | ||
302 | static void __init reserve_brk(void) | 306 | static void __init reserve_brk(void) |
303 | { | 307 | { |
304 | if (_brk_end > _brk_start) | 308 | if (_brk_end > _brk_start) |
305 | reserve_early(__pa(_brk_start), __pa(_brk_end), "BRK"); | 309 | memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); |
306 | 310 | ||
307 | /* Mark brk area as locked down and no longer taking any | 311 | /* Mark brk area as locked down and no longer taking any |
308 | new allocations */ | 312 | new allocations */ |
@@ -324,17 +328,16 @@ static void __init relocate_initrd(void) | |||
324 | char *p, *q; | 328 | char *p, *q; |
325 | 329 | ||
326 | /* We need to move the initrd down into lowmem */ | 330 | /* We need to move the initrd down into lowmem */ |
327 | ramdisk_here = find_e820_area(0, end_of_lowmem, area_size, | 331 | ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, |
328 | PAGE_SIZE); | 332 | PAGE_SIZE); |
329 | 333 | ||
330 | if (ramdisk_here == -1ULL) | 334 | if (ramdisk_here == MEMBLOCK_ERROR) |
331 | panic("Cannot find place for new RAMDISK of size %lld\n", | 335 | panic("Cannot find place for new RAMDISK of size %lld\n", |
332 | ramdisk_size); | 336 | ramdisk_size); |
333 | 337 | ||
334 | /* Note: this includes all the lowmem currently occupied by | 338 | /* Note: this includes all the lowmem currently occupied by |
335 | the initrd, we rely on that fact to keep the data intact. */ | 339 | the initrd, we rely on that fact to keep the data intact. */ |
336 | reserve_early(ramdisk_here, ramdisk_here + area_size, | 340 | memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); |
337 | "NEW RAMDISK"); | ||
338 | initrd_start = ramdisk_here + PAGE_OFFSET; | 341 | initrd_start = ramdisk_here + PAGE_OFFSET; |
339 | initrd_end = initrd_start + ramdisk_size; | 342 | initrd_end = initrd_start + ramdisk_size; |
340 | printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", | 343 | printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", |
@@ -390,7 +393,7 @@ static void __init reserve_initrd(void) | |||
390 | initrd_start = 0; | 393 | initrd_start = 0; |
391 | 394 | ||
392 | if (ramdisk_size >= (end_of_lowmem>>1)) { | 395 | if (ramdisk_size >= (end_of_lowmem>>1)) { |
393 | free_early(ramdisk_image, ramdisk_end); | 396 | memblock_x86_free_range(ramdisk_image, ramdisk_end); |
394 | printk(KERN_ERR "initrd too large to handle, " | 397 | printk(KERN_ERR "initrd too large to handle, " |
395 | "disabling initrd\n"); | 398 | "disabling initrd\n"); |
396 | return; | 399 | return; |
@@ -413,7 +416,7 @@ static void __init reserve_initrd(void) | |||
413 | 416 | ||
414 | relocate_initrd(); | 417 | relocate_initrd(); |
415 | 418 | ||
416 | free_early(ramdisk_image, ramdisk_end); | 419 | memblock_x86_free_range(ramdisk_image, ramdisk_end); |
417 | } | 420 | } |
418 | #else | 421 | #else |
419 | static void __init reserve_initrd(void) | 422 | static void __init reserve_initrd(void) |
@@ -430,16 +433,30 @@ static void __init parse_setup_data(void) | |||
430 | return; | 433 | return; |
431 | pa_data = boot_params.hdr.setup_data; | 434 | pa_data = boot_params.hdr.setup_data; |
432 | while (pa_data) { | 435 | while (pa_data) { |
433 | data = early_memremap(pa_data, PAGE_SIZE); | 436 | u32 data_len, map_len; |
437 | |||
438 | map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK), | ||
439 | (u64)sizeof(struct setup_data)); | ||
440 | data = early_memremap(pa_data, map_len); | ||
441 | data_len = data->len + sizeof(struct setup_data); | ||
442 | if (data_len > map_len) { | ||
443 | early_iounmap(data, map_len); | ||
444 | data = early_memremap(pa_data, data_len); | ||
445 | map_len = data_len; | ||
446 | } | ||
447 | |||
434 | switch (data->type) { | 448 | switch (data->type) { |
435 | case SETUP_E820_EXT: | 449 | case SETUP_E820_EXT: |
436 | parse_e820_ext(data, pa_data); | 450 | parse_e820_ext(data); |
451 | break; | ||
452 | case SETUP_DTB: | ||
453 | add_dtb(pa_data); | ||
437 | break; | 454 | break; |
438 | default: | 455 | default: |
439 | break; | 456 | break; |
440 | } | 457 | } |
441 | pa_data = data->next; | 458 | pa_data = data->next; |
442 | early_iounmap(data, PAGE_SIZE); | 459 | early_iounmap(data, map_len); |
443 | } | 460 | } |
444 | } | 461 | } |
445 | 462 | ||
@@ -469,7 +486,7 @@ static void __init e820_reserve_setup_data(void) | |||
469 | e820_print_map("reserve setup_data"); | 486 | e820_print_map("reserve setup_data"); |
470 | } | 487 | } |
471 | 488 | ||
472 | static void __init reserve_early_setup_data(void) | 489 | static void __init memblock_x86_reserve_range_setup_data(void) |
473 | { | 490 | { |
474 | struct setup_data *data; | 491 | struct setup_data *data; |
475 | u64 pa_data; | 492 | u64 pa_data; |
@@ -481,7 +498,7 @@ static void __init reserve_early_setup_data(void) | |||
481 | while (pa_data) { | 498 | while (pa_data) { |
482 | data = early_memremap(pa_data, sizeof(*data)); | 499 | data = early_memremap(pa_data, sizeof(*data)); |
483 | sprintf(buf, "setup data %x", data->type); | 500 | sprintf(buf, "setup data %x", data->type); |
484 | reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf); | 501 | memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf); |
485 | pa_data = data->next; | 502 | pa_data = data->next; |
486 | early_iounmap(data, sizeof(*data)); | 503 | early_iounmap(data, sizeof(*data)); |
487 | } | 504 | } |
@@ -502,6 +519,18 @@ static inline unsigned long long get_total_mem(void) | |||
502 | return total << PAGE_SHIFT; | 519 | return total << PAGE_SHIFT; |
503 | } | 520 | } |
504 | 521 | ||
522 | /* | ||
523 | * Keep the crash kernel below this limit. On 32 bits earlier kernels | ||
524 | * would limit the kernel to the low 512 MiB due to mapping restrictions. | ||
525 | * On 64 bits, kexec-tools currently limits us to 896 MiB; increase this | ||
526 | * limit once kexec-tools are fixed. | ||
527 | */ | ||
528 | #ifdef CONFIG_X86_32 | ||
529 | # define CRASH_KERNEL_ADDR_MAX (512 << 20) | ||
530 | #else | ||
531 | # define CRASH_KERNEL_ADDR_MAX (896 << 20) | ||
532 | #endif | ||
533 | |||
505 | static void __init reserve_crashkernel(void) | 534 | static void __init reserve_crashkernel(void) |
506 | { | 535 | { |
507 | unsigned long long total_mem; | 536 | unsigned long long total_mem; |
@@ -519,23 +548,27 @@ static void __init reserve_crashkernel(void) | |||
519 | if (crash_base <= 0) { | 548 | if (crash_base <= 0) { |
520 | const unsigned long long alignment = 16<<20; /* 16M */ | 549 | const unsigned long long alignment = 16<<20; /* 16M */ |
521 | 550 | ||
522 | crash_base = find_e820_area(alignment, ULONG_MAX, crash_size, | 551 | /* |
523 | alignment); | 552 | * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX |
524 | if (crash_base == -1ULL) { | 553 | */ |
554 | crash_base = memblock_find_in_range(alignment, | ||
555 | CRASH_KERNEL_ADDR_MAX, crash_size, alignment); | ||
556 | |||
557 | if (crash_base == MEMBLOCK_ERROR) { | ||
525 | pr_info("crashkernel reservation failed - No suitable area found.\n"); | 558 | pr_info("crashkernel reservation failed - No suitable area found.\n"); |
526 | return; | 559 | return; |
527 | } | 560 | } |
528 | } else { | 561 | } else { |
529 | unsigned long long start; | 562 | unsigned long long start; |
530 | 563 | ||
531 | start = find_e820_area(crash_base, ULONG_MAX, crash_size, | 564 | start = memblock_find_in_range(crash_base, |
532 | 1<<20); | 565 | crash_base + crash_size, crash_size, 1<<20); |
533 | if (start != crash_base) { | 566 | if (start != crash_base) { |
534 | pr_info("crashkernel reservation failed - memory is in use.\n"); | 567 | pr_info("crashkernel reservation failed - memory is in use.\n"); |
535 | return; | 568 | return; |
536 | } | 569 | } |
537 | } | 570 | } |
538 | reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL"); | 571 | memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); |
539 | 572 | ||
540 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | 573 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " |
541 | "for crashkernel (System RAM: %ldMB)\n", | 574 | "for crashkernel (System RAM: %ldMB)\n", |
@@ -586,28 +619,6 @@ void __init reserve_standard_io_resources(void) | |||
586 | 619 | ||
587 | } | 620 | } |
588 | 621 | ||
589 | /* | ||
590 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by | ||
591 | * is_kdump_kernel() to determine if we are booting after a panic. Hence | ||
592 | * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. | ||
593 | */ | ||
594 | |||
595 | #ifdef CONFIG_CRASH_DUMP | ||
596 | /* elfcorehdr= specifies the location of elf core header | ||
597 | * stored by the crashed kernel. This option will be passed | ||
598 | * by kexec loader to the capture kernel. | ||
599 | */ | ||
600 | static int __init setup_elfcorehdr(char *arg) | ||
601 | { | ||
602 | char *end; | ||
603 | if (!arg) | ||
604 | return -EINVAL; | ||
605 | elfcorehdr_addr = memparse(arg, &end); | ||
606 | return end > arg ? 0 : -EINVAL; | ||
607 | } | ||
608 | early_param("elfcorehdr", setup_elfcorehdr); | ||
609 | #endif | ||
610 | |||
611 | static __init void reserve_ibft_region(void) | 622 | static __init void reserve_ibft_region(void) |
612 | { | 623 | { |
613 | unsigned long addr, size = 0; | 624 | unsigned long addr, size = 0; |
@@ -615,82 +626,10 @@ static __init void reserve_ibft_region(void) | |||
615 | addr = find_ibft_region(&size); | 626 | addr = find_ibft_region(&size); |
616 | 627 | ||
617 | if (size) | 628 | if (size) |
618 | reserve_early_overlap_ok(addr, addr + size, "ibft"); | 629 | memblock_x86_reserve_range(addr, addr + size, "* ibft"); |
619 | } | 630 | } |
620 | 631 | ||
621 | #ifdef CONFIG_X86_RESERVE_LOW_64K | 632 | static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; |
622 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) | ||
623 | { | ||
624 | printk(KERN_NOTICE | ||
625 | "%s detected: BIOS may corrupt low RAM, working around it.\n", | ||
626 | d->ident); | ||
627 | |||
628 | e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); | ||
629 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | ||
630 | |||
631 | return 0; | ||
632 | } | ||
633 | #endif | ||
634 | |||
635 | /* List of systems that have known low memory corruption BIOS problems */ | ||
636 | static struct dmi_system_id __initdata bad_bios_dmi_table[] = { | ||
637 | #ifdef CONFIG_X86_RESERVE_LOW_64K | ||
638 | { | ||
639 | .callback = dmi_low_memory_corruption, | ||
640 | .ident = "AMI BIOS", | ||
641 | .matches = { | ||
642 | DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), | ||
643 | }, | ||
644 | }, | ||
645 | { | ||
646 | .callback = dmi_low_memory_corruption, | ||
647 | .ident = "Phoenix BIOS", | ||
648 | .matches = { | ||
649 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), | ||
650 | }, | ||
651 | }, | ||
652 | { | ||
653 | .callback = dmi_low_memory_corruption, | ||
654 | .ident = "Phoenix/MSC BIOS", | ||
655 | .matches = { | ||
656 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"), | ||
657 | }, | ||
658 | }, | ||
659 | /* | ||
660 | * AMI BIOS with low memory corruption was found on Intel DG45ID and | ||
661 | * DG45FC boards. | ||
662 | * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will | ||
663 | * match only DMI_BOARD_NAME and see if there is more bad products | ||
664 | * with this vendor. | ||
665 | */ | ||
666 | { | ||
667 | .callback = dmi_low_memory_corruption, | ||
668 | .ident = "AMI BIOS", | ||
669 | .matches = { | ||
670 | DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), | ||
671 | }, | ||
672 | }, | ||
673 | { | ||
674 | .callback = dmi_low_memory_corruption, | ||
675 | .ident = "AMI BIOS", | ||
676 | .matches = { | ||
677 | DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), | ||
678 | }, | ||
679 | }, | ||
680 | /* | ||
681 | * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so | ||
682 | * match on the product name. | ||
683 | */ | ||
684 | { | ||
685 | .callback = dmi_low_memory_corruption, | ||
686 | .ident = "Phoenix BIOS", | ||
687 | .matches = { | ||
688 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), | ||
689 | }, | ||
690 | }, | ||
691 | #endif | ||
692 | {} | ||
693 | }; | ||
694 | 633 | ||
695 | static void __init trim_bios_range(void) | 634 | static void __init trim_bios_range(void) |
696 | { | 635 | { |
@@ -698,8 +637,14 @@ static void __init trim_bios_range(void) | |||
698 | * A special case is the first 4Kb of memory; | 637 | * A special case is the first 4Kb of memory; |
699 | * This is a BIOS owned area, not kernel ram, but generally | 638 | * This is a BIOS owned area, not kernel ram, but generally |
700 | * not listed as such in the E820 table. | 639 | * not listed as such in the E820 table. |
640 | * | ||
641 | * This typically reserves additional memory (64KiB by default) | ||
642 | * since some BIOSes are known to corrupt low memory. See the | ||
643 | * Kconfig help text for X86_RESERVE_LOW. | ||
701 | */ | 644 | */ |
702 | e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED); | 645 | e820_update_range(0, ALIGN(reserve_low, PAGE_SIZE), |
646 | E820_RAM, E820_RESERVED); | ||
647 | |||
703 | /* | 648 | /* |
704 | * special case: Some BIOSen report the PC BIOS | 649 | * special case: Some BIOSen report the PC BIOS |
705 | * area (640->1Mb) as ram even though it is not. | 650 | * area (640->1Mb) as ram even though it is not. |
@@ -709,6 +654,28 @@ static void __init trim_bios_range(void) | |||
709 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); | 654 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
710 | } | 655 | } |
711 | 656 | ||
657 | static int __init parse_reservelow(char *p) | ||
658 | { | ||
659 | unsigned long long size; | ||
660 | |||
661 | if (!p) | ||
662 | return -EINVAL; | ||
663 | |||
664 | size = memparse(p, &p); | ||
665 | |||
666 | if (size < 4096) | ||
667 | size = 4096; | ||
668 | |||
669 | if (size > 640*1024) | ||
670 | size = 640*1024; | ||
671 | |||
672 | reserve_low = size; | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | early_param("reservelow", parse_reservelow); | ||
678 | |||
712 | /* | 679 | /* |
713 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 680 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
714 | * passed the efi memmap, systab, etc., so we should use these data structures | 681 | * passed the efi memmap, systab, etc., so we should use these data structures |
@@ -724,20 +691,28 @@ static void __init trim_bios_range(void) | |||
724 | 691 | ||
725 | void __init setup_arch(char **cmdline_p) | 692 | void __init setup_arch(char **cmdline_p) |
726 | { | 693 | { |
727 | int acpi = 0; | ||
728 | int k8 = 0; | ||
729 | |||
730 | #ifdef CONFIG_X86_32 | 694 | #ifdef CONFIG_X86_32 |
731 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); | 695 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
732 | visws_early_detect(); | 696 | visws_early_detect(); |
697 | |||
698 | /* | ||
699 | * copy kernel address range established so far and switch | ||
700 | * to the proper swapper page table | ||
701 | */ | ||
702 | clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
703 | initial_page_table + KERNEL_PGD_BOUNDARY, | ||
704 | KERNEL_PGD_PTRS); | ||
705 | |||
706 | load_cr3(swapper_pg_dir); | ||
707 | __flush_tlb_all(); | ||
733 | #else | 708 | #else |
734 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | 709 | printk(KERN_INFO "Command line: %s\n", boot_command_line); |
735 | #endif | 710 | #endif |
736 | 711 | ||
737 | /* VMI may relocate the fixmap; do this before touching ioremap area */ | 712 | /* |
738 | vmi_init(); | 713 | * If we have OLPC OFW, we might end up relocating the fixmap due to |
739 | 714 | * reserve_top(), so do this before touching the ioremap area. | |
740 | /* OFW also may relocate the fixmap */ | 715 | */ |
741 | olpc_ofw_detect(); | 716 | olpc_ofw_detect(); |
742 | 717 | ||
743 | early_trap_init(); | 718 | early_trap_init(); |
@@ -782,12 +757,13 @@ void __init setup_arch(char **cmdline_p) | |||
782 | #endif | 757 | #endif |
783 | 4)) { | 758 | 4)) { |
784 | efi_enabled = 1; | 759 | efi_enabled = 1; |
785 | efi_reserve_early(); | 760 | efi_memblock_x86_reserve_range(); |
786 | } | 761 | } |
787 | #endif | 762 | #endif |
788 | 763 | ||
789 | x86_init.oem.arch_setup(); | 764 | x86_init.oem.arch_setup(); |
790 | 765 | ||
766 | iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; | ||
791 | setup_memory_map(); | 767 | setup_memory_map(); |
792 | parse_setup_data(); | 768 | parse_setup_data(); |
793 | /* update the e820_saved too */ | 769 | /* update the e820_saved too */ |
@@ -838,11 +814,8 @@ void __init setup_arch(char **cmdline_p) | |||
838 | 814 | ||
839 | x86_report_nx(); | 815 | x86_report_nx(); |
840 | 816 | ||
841 | /* Must be before kernel pagetables are setup */ | ||
842 | vmi_activate(); | ||
843 | |||
844 | /* after early param, so could get panic from serial */ | 817 | /* after early param, so could get panic from serial */ |
845 | reserve_early_setup_data(); | 818 | memblock_x86_reserve_range_setup_data(); |
846 | 819 | ||
847 | if (acpi_mps_check()) { | 820 | if (acpi_mps_check()) { |
848 | #ifdef CONFIG_X86_LOCAL_APIC | 821 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -863,8 +836,6 @@ void __init setup_arch(char **cmdline_p) | |||
863 | 836 | ||
864 | dmi_scan_machine(); | 837 | dmi_scan_machine(); |
865 | 838 | ||
866 | dmi_check_system(bad_bios_dmi_table); | ||
867 | |||
868 | /* | 839 | /* |
869 | * VMware detection requires dmi to be available, so this | 840 | * VMware detection requires dmi to be available, so this |
870 | * needs to be done after dmi_scan_machine, for the BP. | 841 | * needs to be done after dmi_scan_machine, for the BP. |
@@ -897,8 +868,6 @@ void __init setup_arch(char **cmdline_p) | |||
897 | */ | 868 | */ |
898 | max_pfn = e820_end_of_ram_pfn(); | 869 | max_pfn = e820_end_of_ram_pfn(); |
899 | 870 | ||
900 | /* preallocate 4k for mptable mpc */ | ||
901 | early_reserve_e820_mpc_new(); | ||
902 | /* update e820 for memory not covered by WB MTRRs */ | 871 | /* update e820 for memory not covered by WB MTRRs */ |
903 | mtrr_bp_init(); | 872 | mtrr_bp_init(); |
904 | if (mtrr_trim_uncached_memory(max_pfn)) | 873 | if (mtrr_trim_uncached_memory(max_pfn)) |
@@ -920,18 +889,8 @@ void __init setup_arch(char **cmdline_p) | |||
920 | max_low_pfn = max_pfn; | 889 | max_low_pfn = max_pfn; |
921 | 890 | ||
922 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | 891 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; |
923 | max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; | ||
924 | #endif | 892 | #endif |
925 | 893 | ||
926 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION | ||
927 | setup_bios_corruption_check(); | ||
928 | #endif | ||
929 | |||
930 | printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", | ||
931 | max_pfn_mapped<<PAGE_SHIFT); | ||
932 | |||
933 | reserve_brk(); | ||
934 | |||
935 | /* | 894 | /* |
936 | * Find and reserve possible boot-time SMP configuration: | 895 | * Find and reserve possible boot-time SMP configuration: |
937 | */ | 896 | */ |
@@ -939,15 +898,37 @@ void __init setup_arch(char **cmdline_p) | |||
939 | 898 | ||
940 | reserve_ibft_region(); | 899 | reserve_ibft_region(); |
941 | 900 | ||
942 | reserve_trampoline_memory(); | 901 | /* |
902 | * Need to conclude brk, before memblock_x86_fill() | ||
903 | * it could use memblock_find_in_range, could overlap with | ||
904 | * brk area. | ||
905 | */ | ||
906 | reserve_brk(); | ||
907 | |||
908 | cleanup_highmap(); | ||
909 | |||
910 | memblock.current_limit = get_max_mapped(); | ||
911 | memblock_x86_fill(); | ||
943 | 912 | ||
944 | #ifdef CONFIG_ACPI_SLEEP | ||
945 | /* | 913 | /* |
946 | * Reserve low memory region for sleep support. | 914 | * The EFI specification says that boot service code won't be called |
947 | * even before init_memory_mapping | 915 | * after ExitBootServices(). This is, in fact, a lie. |
948 | */ | 916 | */ |
949 | acpi_reserve_wakeup_memory(); | 917 | if (efi_enabled) |
918 | efi_reserve_boot_services(); | ||
919 | |||
920 | /* preallocate 4k for mptable mpc */ | ||
921 | early_reserve_e820_mpc_new(); | ||
922 | |||
923 | #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION | ||
924 | setup_bios_corruption_check(); | ||
950 | #endif | 925 | #endif |
926 | |||
927 | printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", | ||
928 | max_pfn_mapped<<PAGE_SHIFT); | ||
929 | |||
930 | setup_trampolines(); | ||
931 | |||
951 | init_gbpages(); | 932 | init_gbpages(); |
952 | 933 | ||
953 | /* max_pfn_mapped is updated here */ | 934 | /* max_pfn_mapped is updated here */ |
@@ -962,6 +943,7 @@ void __init setup_arch(char **cmdline_p) | |||
962 | max_low_pfn = max_pfn; | 943 | max_low_pfn = max_pfn; |
963 | } | 944 | } |
964 | #endif | 945 | #endif |
946 | memblock.current_limit = get_max_mapped(); | ||
965 | 947 | ||
966 | /* | 948 | /* |
967 | * NOTE: On x86-32, only from this point on, fixmaps are ready for use. | 949 | * NOTE: On x86-32, only from this point on, fixmaps are ready for use. |
@@ -971,6 +953,8 @@ void __init setup_arch(char **cmdline_p) | |||
971 | if (init_ohci1394_dma_early) | 953 | if (init_ohci1394_dma_early) |
972 | init_ohci1394_dma_on_all_controllers(); | 954 | init_ohci1394_dma_on_all_controllers(); |
973 | #endif | 955 | #endif |
956 | /* Allocate bigger log buffer */ | ||
957 | setup_log_buf(1); | ||
974 | 958 | ||
975 | reserve_initrd(); | 959 | reserve_initrd(); |
976 | 960 | ||
@@ -987,24 +971,8 @@ void __init setup_arch(char **cmdline_p) | |||
987 | 971 | ||
988 | early_acpi_boot_init(); | 972 | early_acpi_boot_init(); |
989 | 973 | ||
990 | #ifdef CONFIG_ACPI_NUMA | 974 | initmem_init(); |
991 | /* | 975 | memblock_find_dma_reserve(); |
992 | * Parse SRAT to discover nodes. | ||
993 | */ | ||
994 | acpi = acpi_numa_init(); | ||
995 | #endif | ||
996 | |||
997 | #ifdef CONFIG_K8_NUMA | ||
998 | if (!acpi) | ||
999 | k8 = !k8_numa_init(0, max_pfn); | ||
1000 | #endif | ||
1001 | |||
1002 | initmem_init(0, max_pfn, acpi, k8); | ||
1003 | #ifndef CONFIG_NO_BOOTMEM | ||
1004 | early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT); | ||
1005 | #endif | ||
1006 | |||
1007 | dma32_reserve_bootmem(); | ||
1008 | 976 | ||
1009 | #ifdef CONFIG_KVM_CLOCK | 977 | #ifdef CONFIG_KVM_CLOCK |
1010 | kvmclock_init(); | 978 | kvmclock_init(); |
@@ -1014,7 +982,17 @@ void __init setup_arch(char **cmdline_p) | |||
1014 | paging_init(); | 982 | paging_init(); |
1015 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); | 983 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); |
1016 | 984 | ||
1017 | setup_trampoline_page_table(); | 985 | if (boot_cpu_data.cpuid_level >= 0) { |
986 | /* A CPU has %cr4 if and only if it has CPUID */ | ||
987 | mmu_cr4_features = read_cr4(); | ||
988 | } | ||
989 | |||
990 | #ifdef CONFIG_X86_32 | ||
991 | /* sync back kernel address range */ | ||
992 | clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, | ||
993 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
994 | KERNEL_PGD_PTRS); | ||
995 | #endif | ||
1018 | 996 | ||
1019 | tboot_probe(); | 997 | tboot_probe(); |
1020 | 998 | ||
@@ -1030,8 +1008,8 @@ void __init setup_arch(char **cmdline_p) | |||
1030 | * Read APIC and some other early information from ACPI tables. | 1008 | * Read APIC and some other early information from ACPI tables. |
1031 | */ | 1009 | */ |
1032 | acpi_boot_init(); | 1010 | acpi_boot_init(); |
1033 | |||
1034 | sfi_init(); | 1011 | sfi_init(); |
1012 | x86_dtb_init(); | ||
1035 | 1013 | ||
1036 | /* | 1014 | /* |
1037 | * get boot-time SMP configuration: | 1015 | * get boot-time SMP configuration: |
@@ -1041,15 +1019,10 @@ void __init setup_arch(char **cmdline_p) | |||
1041 | 1019 | ||
1042 | prefill_possible_map(); | 1020 | prefill_possible_map(); |
1043 | 1021 | ||
1044 | #ifdef CONFIG_X86_64 | ||
1045 | init_cpu_to_node(); | 1022 | init_cpu_to_node(); |
1046 | #endif | ||
1047 | 1023 | ||
1048 | init_apic_mappings(); | 1024 | init_apic_mappings(); |
1049 | ioapic_init_mappings(); | 1025 | ioapic_and_gsi_init(); |
1050 | |||
1051 | /* need to wait for io_apic is mapped */ | ||
1052 | probe_nr_irqs_gsi(); | ||
1053 | 1026 | ||
1054 | kvm_guest_init(); | 1027 | kvm_guest_init(); |
1055 | 1028 | ||
@@ -1070,7 +1043,11 @@ void __init setup_arch(char **cmdline_p) | |||
1070 | #endif | 1043 | #endif |
1071 | x86_init.oem.banner(); | 1044 | x86_init.oem.banner(); |
1072 | 1045 | ||
1046 | x86_init.timers.wallclock_init(); | ||
1047 | |||
1073 | mcheck_init(); | 1048 | mcheck_init(); |
1049 | |||
1050 | arch_init_ideal_nops(); | ||
1074 | } | 1051 | } |
1075 | 1052 | ||
1076 | #ifdef CONFIG_X86_32 | 1053 | #ifdef CONFIG_X86_32 |