aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-18 13:31:12 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-18 13:31:12 -0400
commit3e370b29d35fb01bfb92c2814d6f79bf6a2cb970 (patch)
tree3b8fb467d60bfe6a34686f4abdc3a60050ba40a4 /arch/x86/kernel/setup.c
parent88d1dce3a74367291f65a757fbdcaf17f042f30c (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
Merge branch 'linus' into x86/pci-ioapic-boot-irq-quirks
Conflicts: drivers/pci/quirks.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r--arch/x86/kernel/setup.c73
1 files changed, 56 insertions, 17 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cfcfbefee0b9..531b55b8e81a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -394,11 +394,10 @@ static void __init parse_setup_data(void)
394 } 394 }
395} 395}
396 396
397static void __init reserve_setup_data(void) 397static void __init e820_reserve_setup_data(void)
398{ 398{
399 struct setup_data *data; 399 struct setup_data *data;
400 u64 pa_data; 400 u64 pa_data;
401 char buf[32];
402 int found = 0; 401 int found = 0;
403 402
404 if (boot_params.hdr.version < 0x0209) 403 if (boot_params.hdr.version < 0x0209)
@@ -406,8 +405,6 @@ static void __init reserve_setup_data(void)
406 pa_data = boot_params.hdr.setup_data; 405 pa_data = boot_params.hdr.setup_data;
407 while (pa_data) { 406 while (pa_data) {
408 data = early_ioremap(pa_data, sizeof(*data)); 407 data = early_ioremap(pa_data, sizeof(*data));
409 sprintf(buf, "setup data %x", data->type);
410 reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
411 e820_update_range(pa_data, sizeof(*data)+data->len, 408 e820_update_range(pa_data, sizeof(*data)+data->len,
412 E820_RAM, E820_RESERVED_KERN); 409 E820_RAM, E820_RESERVED_KERN);
413 found = 1; 410 found = 1;
@@ -418,10 +415,29 @@ static void __init reserve_setup_data(void)
418 return; 415 return;
419 416
420 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 417 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
418 memcpy(&e820_saved, &e820, sizeof(struct e820map));
421 printk(KERN_INFO "extended physical RAM map:\n"); 419 printk(KERN_INFO "extended physical RAM map:\n");
422 e820_print_map("reserve setup_data"); 420 e820_print_map("reserve setup_data");
423} 421}
424 422
423static void __init reserve_early_setup_data(void)
424{
425 struct setup_data *data;
426 u64 pa_data;
427 char buf[32];
428
429 if (boot_params.hdr.version < 0x0209)
430 return;
431 pa_data = boot_params.hdr.setup_data;
432 while (pa_data) {
433 data = early_ioremap(pa_data, sizeof(*data));
434 sprintf(buf, "setup data %x", data->type);
435 reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
436 pa_data = data->next;
437 early_iounmap(data, sizeof(*data));
438 }
439}
440
425/* 441/*
426 * --------- Crashkernel reservation ------------------------------ 442 * --------- Crashkernel reservation ------------------------------
427 */ 443 */
@@ -580,6 +596,7 @@ void __init setup_arch(char **cmdline_p)
580{ 596{
581#ifdef CONFIG_X86_32 597#ifdef CONFIG_X86_32
582 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 598 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
599 visws_early_detect();
583 pre_setup_arch_hook(); 600 pre_setup_arch_hook();
584 early_cpu_init(); 601 early_cpu_init();
585#else 602#else
@@ -626,6 +643,8 @@ void __init setup_arch(char **cmdline_p)
626 643
627 setup_memory_map(); 644 setup_memory_map();
628 parse_setup_data(); 645 parse_setup_data();
646 /* update the e820_saved too */
647 e820_reserve_setup_data();
629 648
630 copy_edd(); 649 copy_edd();
631 650
@@ -656,7 +675,7 @@ void __init setup_arch(char **cmdline_p)
656 parse_early_param(); 675 parse_early_param();
657 676
658 /* after early param, so could get panic from serial */ 677 /* after early param, so could get panic from serial */
659 reserve_setup_data(); 678 reserve_early_setup_data();
660 679
661 if (acpi_mps_check()) { 680 if (acpi_mps_check()) {
662#ifdef CONFIG_X86_LOCAL_APIC 681#ifdef CONFIG_X86_LOCAL_APIC
@@ -665,6 +684,11 @@ void __init setup_arch(char **cmdline_p)
665 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); 684 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
666 } 685 }
667 686
687#ifdef CONFIG_PCI
688 if (pci_early_dump_regs)
689 early_dump_pci_devices();
690#endif
691
668 finish_e820_parsing(); 692 finish_e820_parsing();
669 693
670#ifdef CONFIG_X86_32 694#ifdef CONFIG_X86_32
@@ -691,22 +715,18 @@ void __init setup_arch(char **cmdline_p)
691 early_gart_iommu_check(); 715 early_gart_iommu_check();
692#endif 716#endif
693 717
694 e820_register_active_regions(0, 0, -1UL);
695 /* 718 /*
696 * partially used pages are not usable - thus 719 * partially used pages are not usable - thus
697 * we are rounding upwards: 720 * we are rounding upwards:
698 */ 721 */
699 max_pfn = e820_end_of_ram(); 722 max_pfn = e820_end_of_ram_pfn();
700 723
701 /* preallocate 4k for mptable mpc */ 724 /* preallocate 4k for mptable mpc */
702 early_reserve_e820_mpc_new(); 725 early_reserve_e820_mpc_new();
703 /* update e820 for memory not covered by WB MTRRs */ 726 /* update e820 for memory not covered by WB MTRRs */
704 mtrr_bp_init(); 727 mtrr_bp_init();
705 if (mtrr_trim_uncached_memory(max_pfn)) { 728 if (mtrr_trim_uncached_memory(max_pfn))
706 remove_all_active_ranges(); 729 max_pfn = e820_end_of_ram_pfn();
707 e820_register_active_regions(0, 0, -1UL);
708 max_pfn = e820_end_of_ram();
709 }
710 730
711#ifdef CONFIG_X86_32 731#ifdef CONFIG_X86_32
712 /* max_low_pfn get updated here */ 732 /* max_low_pfn get updated here */
@@ -718,12 +738,26 @@ void __init setup_arch(char **cmdline_p)
718 738
719 /* How many end-of-memory variables you have, grandma! */ 739 /* How many end-of-memory variables you have, grandma! */
720 /* need this before calling reserve_initrd */ 740 /* need this before calling reserve_initrd */
721 max_low_pfn = max_pfn; 741 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
742 max_low_pfn = e820_end_of_low_ram_pfn();
743 else
744 max_low_pfn = max_pfn;
745
722 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 746 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
723#endif 747#endif
724 748
725 /* max_pfn_mapped is updated here */ 749 /* max_pfn_mapped is updated here */
726 max_pfn_mapped = init_memory_mapping(0, (max_low_pfn << PAGE_SHIFT)); 750 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
751 max_pfn_mapped = max_low_pfn_mapped;
752
753#ifdef CONFIG_X86_64
754 if (max_pfn > max_low_pfn) {
755 max_pfn_mapped = init_memory_mapping(1UL<<32,
756 max_pfn<<PAGE_SHIFT);
757 /* can we preseve max_low_pfn ?*/
758 max_low_pfn = max_pfn;
759 }
760#endif
727 761
728 /* 762 /*
729 * NOTE: On x86-32, only from this point on, fixmaps are ready for use. 763 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -749,9 +783,6 @@ void __init setup_arch(char **cmdline_p)
749 */ 783 */
750 acpi_boot_table_init(); 784 acpi_boot_table_init();
751 785
752 /* Remove active ranges so rediscovery with NUMA-awareness happens */
753 remove_all_active_ranges();
754
755#ifdef CONFIG_ACPI_NUMA 786#ifdef CONFIG_ACPI_NUMA
756 /* 787 /*
757 * Parse SRAT to discover nodes. 788 * Parse SRAT to discover nodes.
@@ -823,6 +854,14 @@ void __init setup_arch(char **cmdline_p)
823 init_cpu_to_node(); 854 init_cpu_to_node();
824#endif 855#endif
825 856
857#ifdef CONFIG_X86_NUMAQ
858 /*
859 * need to check online nodes num, call it
860 * here before time_init/tsc_init
861 */
862 numaq_tsc_disable();
863#endif
864
826 init_apic_mappings(); 865 init_apic_mappings();
827 ioapic_init_mappings(); 866 ioapic_init_mappings();
828 867