aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-09-05 12:56:57 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-05 12:56:57 -0400
commit616ad8c44281c0c6711a72b560e01ec335ff27e0 (patch)
tree0a20453ffedb09db6fb41a0c2208ccc2c7751d3a /arch/x86/kernel
parent99809963c99e1ed868d9ebeb4a5e7ee1cbe0309f (diff)
parentb380b0d4f7dffcc235c0facefa537d4655619101 (diff)
Merge branch 'linus' into x86/defconfig
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/amd_iommu.c21
-rw-r--r--arch/x86/kernel/amd_iommu_init.c24
-rw-r--r--arch/x86/kernel/apic_32.c22
-rw-r--r--arch/x86/kernel/apic_64.c7
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c17
-rw-r--r--arch/x86/kernel/cpu/bugs.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c109
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c18
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c18
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c20
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c5
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c8
-rw-r--r--arch/x86/kernel/cpuid.c15
-rw-r--r--arch/x86/kernel/efi_32.c4
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c10
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/hpet.c24
-rw-r--r--arch/x86/kernel/io_apic_32.c6
-rw-r--r--arch/x86/kernel/io_apic_64.c25
-rw-r--r--arch/x86/kernel/machine_kexec_32.c20
-rw-r--r--arch/x86/kernel/mfgpt_32.c52
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/mpparse.c17
-rw-r--r--arch/x86/kernel/msr.c40
-rw-r--r--arch/x86/kernel/nmi.c28
-rw-r--r--arch/x86/kernel/numaq_32.c2
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c16
-rw-r--r--arch/x86/kernel/process_32.c5
-rw-r--r--arch/x86/kernel/process_64.c5
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S10
-rw-r--r--arch/x86/kernel/setup.c24
-rw-r--r--arch/x86/kernel/signal_64.c11
-rw-r--r--arch/x86/kernel/smpboot.c68
-rw-r--r--arch/x86/kernel/smpcommon.c17
-rw-r--r--arch/x86/kernel/tlb_uv.c3
-rw-r--r--arch/x86/kernel/traps_64.c9
-rw-r--r--arch/x86/kernel/tsc.c242
-rw-r--r--arch/x86/kernel/tsc_sync.c6
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmi_32.c3
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S8
46 files changed, 636 insertions, 348 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index fa88a1d71290..bfd10fd211cd 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -97,6 +97,8 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
97#warning ACPI uses CMPXCHG, i486 and later hardware 97#warning ACPI uses CMPXCHG, i486 and later hardware
98#endif 98#endif
99 99
100static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
101
100/* -------------------------------------------------------------------------- 102/* --------------------------------------------------------------------------
101 Boot-time Configuration 103 Boot-time Configuration
102 -------------------------------------------------------------------------- */ 104 -------------------------------------------------------------------------- */
@@ -158,6 +160,14 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
158struct acpi_mcfg_allocation *pci_mmcfg_config; 160struct acpi_mcfg_allocation *pci_mmcfg_config;
159int pci_mmcfg_config_num; 161int pci_mmcfg_config_num;
160 162
163static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
164{
165 if (!strcmp(mcfg->header.oem_id, "SGI"))
166 acpi_mcfg_64bit_base_addr = TRUE;
167
168 return 0;
169}
170
161int __init acpi_parse_mcfg(struct acpi_table_header *header) 171int __init acpi_parse_mcfg(struct acpi_table_header *header)
162{ 172{
163 struct acpi_table_mcfg *mcfg; 173 struct acpi_table_mcfg *mcfg;
@@ -190,8 +200,12 @@ int __init acpi_parse_mcfg(struct acpi_table_header *header)
190 } 200 }
191 201
192 memcpy(pci_mmcfg_config, &mcfg[1], config_size); 202 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
203
204 acpi_mcfg_oem_check(mcfg);
205
193 for (i = 0; i < pci_mmcfg_config_num; ++i) { 206 for (i = 0; i < pci_mmcfg_config_num; ++i) {
194 if (pci_mmcfg_config[i].address > 0xFFFFFFFF) { 207 if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
208 !acpi_mcfg_64bit_base_addr) {
195 printk(KERN_ERR PREFIX 209 printk(KERN_ERR PREFIX
196 "MMCONFIG not in low 4GB of memory\n"); 210 "MMCONFIG not in low 4GB of memory\n");
197 kfree(pci_mmcfg_config); 211 kfree(pci_mmcfg_config);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index fa2161d5003b..426e5d91b63a 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -20,7 +20,7 @@ unsigned long acpi_realmode_flags;
20/* address in low memory of the wakeup routine. */ 20/* address in low memory of the wakeup routine. */
21static unsigned long acpi_realmode; 21static unsigned long acpi_realmode;
22 22
23#ifdef CONFIG_64BIT 23#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
24static char temp_stack[10240]; 24static char temp_stack[10240];
25#endif 25#endif
26 26
@@ -86,7 +86,7 @@ int acpi_save_state_mem(void)
86#endif /* !CONFIG_64BIT */ 86#endif /* !CONFIG_64BIT */
87 87
88 header->pmode_cr0 = read_cr0(); 88 header->pmode_cr0 = read_cr0();
89 header->pmode_cr4 = read_cr4(); 89 header->pmode_cr4 = read_cr4_safe();
90 header->realmode_flags = acpi_realmode_flags; 90 header->realmode_flags = acpi_realmode_flags;
91 header->real_magic = 0x12345678; 91 header->real_magic = 0x12345678;
92 92
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 22d7d050905d..69b4d060b21c 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -65,7 +65,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
65 u8 *target; 65 u8 *target;
66 66
67 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 67 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
68 target = (iommu->cmd_buf + tail); 68 target = iommu->cmd_buf + tail;
69 memcpy_toio(target, cmd, sizeof(*cmd)); 69 memcpy_toio(target, cmd, sizeof(*cmd));
70 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; 70 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
71 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 71 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
@@ -101,16 +101,13 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
101 */ 101 */
102static int iommu_completion_wait(struct amd_iommu *iommu) 102static int iommu_completion_wait(struct amd_iommu *iommu)
103{ 103{
104 int ret; 104 int ret, ready = 0;
105 unsigned status = 0;
105 struct iommu_cmd cmd; 106 struct iommu_cmd cmd;
106 volatile u64 ready = 0;
107 unsigned long ready_phys = virt_to_phys(&ready);
108 unsigned long i = 0; 107 unsigned long i = 0;
109 108
110 memset(&cmd, 0, sizeof(cmd)); 109 memset(&cmd, 0, sizeof(cmd));
111 cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK; 110 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
112 cmd.data[1] = upper_32_bits(ready_phys);
113 cmd.data[2] = 1; /* value written to 'ready' */
114 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 111 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
115 112
116 iommu->need_sync = 0; 113 iommu->need_sync = 0;
@@ -122,9 +119,15 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
122 119
123 while (!ready && (i < EXIT_LOOP_COUNT)) { 120 while (!ready && (i < EXIT_LOOP_COUNT)) {
124 ++i; 121 ++i;
125 cpu_relax(); 122 /* wait for the bit to become one */
123 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
124 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
126 } 125 }
127 126
127 /* set bit back to zero */
128 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
129 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
130
128 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) 131 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
129 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); 132 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
130 133
@@ -161,7 +164,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
161 address &= PAGE_MASK; 164 address &= PAGE_MASK;
162 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); 165 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
163 cmd.data[1] |= domid; 166 cmd.data[1] |= domid;
164 cmd.data[2] = LOW_U32(address); 167 cmd.data[2] = lower_32_bits(address);
165 cmd.data[3] = upper_32_bits(address); 168 cmd.data[3] = upper_32_bits(address);
166 if (s) /* size bit - we flush more than one 4kb page */ 169 if (s) /* size bit - we flush more than one 4kb page */
167 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 170 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index d9a9da597e79..a69cc0f52042 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -801,6 +801,21 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
801} 801}
802 802
803/* 803/*
804 * Init the device table to not allow DMA access for devices and
805 * suppress all page faults
806 */
807static void init_device_table(void)
808{
809 u16 devid;
810
811 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
812 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
813 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
814 set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT);
815 }
816}
817
818/*
804 * This function finally enables all IOMMUs found in the system after 819 * This function finally enables all IOMMUs found in the system after
805 * they have been initialized 820 * they have been initialized
806 */ 821 */
@@ -931,6 +946,9 @@ int __init amd_iommu_init(void)
931 if (amd_iommu_pd_alloc_bitmap == NULL) 946 if (amd_iommu_pd_alloc_bitmap == NULL)
932 goto free; 947 goto free;
933 948
949 /* init the device table */
950 init_device_table();
951
934 /* 952 /*
935 * let all alias entries point to itself 953 * let all alias entries point to itself
936 */ 954 */
@@ -954,15 +972,15 @@ int __init amd_iommu_init(void)
954 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 972 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
955 goto free; 973 goto free;
956 974
957 ret = amd_iommu_init_dma_ops(); 975 ret = sysdev_class_register(&amd_iommu_sysdev_class);
958 if (ret) 976 if (ret)
959 goto free; 977 goto free;
960 978
961 ret = sysdev_class_register(&amd_iommu_sysdev_class); 979 ret = sysdev_register(&device_amd_iommu);
962 if (ret) 980 if (ret)
963 goto free; 981 goto free;
964 982
965 ret = sysdev_register(&device_amd_iommu); 983 ret = amd_iommu_init_dma_ops();
966 if (ret) 984 if (ret)
967 goto free; 985 goto free;
968 986
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index d6c898358371..f88bd0d982b0 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -1454,8 +1454,6 @@ void disconnect_bsp_APIC(int virt_wire_setup)
1454 } 1454 }
1455} 1455}
1456 1456
1457unsigned int __cpuinitdata maxcpus = NR_CPUS;
1458
1459void __cpuinit generic_processor_info(int apicid, int version) 1457void __cpuinit generic_processor_info(int apicid, int version)
1460{ 1458{
1461 int cpu; 1459 int cpu;
@@ -1482,12 +1480,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1482 return; 1480 return;
1483 } 1481 }
1484 1482
1485 if (num_processors >= maxcpus) {
1486 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
1487 " Processor ignored.\n", maxcpus);
1488 return;
1489 }
1490
1491 num_processors++; 1483 num_processors++;
1492 cpus_complement(tmp_map, cpu_present_map); 1484 cpus_complement(tmp_map, cpu_present_map);
1493 cpu = first_cpu(tmp_map); 1485 cpu = first_cpu(tmp_map);
@@ -1720,15 +1712,19 @@ static int __init parse_lapic_timer_c2_ok(char *arg)
1720} 1712}
1721early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok); 1713early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1722 1714
1723static int __init apic_set_verbosity(char *str) 1715static int __init apic_set_verbosity(char *arg)
1724{ 1716{
1725 if (strcmp("debug", str) == 0) 1717 if (!arg)
1718 return -EINVAL;
1719
1720 if (strcmp(arg, "debug") == 0)
1726 apic_verbosity = APIC_DEBUG; 1721 apic_verbosity = APIC_DEBUG;
1727 else if (strcmp("verbose", str) == 0) 1722 else if (strcmp(arg, "verbose") == 0)
1728 apic_verbosity = APIC_VERBOSE; 1723 apic_verbosity = APIC_VERBOSE;
1729 return 1; 1724
1725 return 0;
1730} 1726}
1731__setup("apic=", apic_set_verbosity); 1727early_param("apic", apic_set_verbosity);
1732 1728
1733static int __init lapic_insert_resource(void) 1729static int __init lapic_insert_resource(void)
1734{ 1730{
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 7f1f030da7ee..446c062e831c 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -90,7 +90,6 @@ static unsigned long apic_phys;
90 90
91unsigned long mp_lapic_addr; 91unsigned long mp_lapic_addr;
92 92
93unsigned int __cpuinitdata maxcpus = NR_CPUS;
94/* 93/*
95 * Get the LAPIC version 94 * Get the LAPIC version
96 */ 95 */
@@ -1062,12 +1061,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1062 return; 1061 return;
1063 } 1062 }
1064 1063
1065 if (num_processors >= maxcpus) {
1066 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
1067 " Processor ignored.\n", maxcpus);
1068 return;
1069 }
1070
1071 num_processors++; 1064 num_processors++;
1072 cpus_complement(tmp_map, cpu_present_map); 1065 cpus_complement(tmp_map, cpu_present_map);
1073 cpu = first_cpu(tmp_map); 1066 cpu = first_cpu(tmp_map);
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 84a8220a6072..a6ef672adbba 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -56,9 +56,22 @@ void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
56 56
57 switch (c->x86_vendor) { 57 switch (c->x86_vendor) {
58 case X86_VENDOR_INTEL: 58 case X86_VENDOR_INTEL:
59 if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) 59 /*
60 * There is a known erratum on Pentium III and Core Solo
61 * and Core Duo CPUs.
62 * " Page with PAT set to WC while associated MTRR is UC
63 * may consolidate to UC "
64 * Because of this erratum, it is better to stick with
65 * setting WC in MTRR rather than using PAT on these CPUs.
66 *
67 * Enable PAT WC only on P4, Core 2 or later CPUs.
68 */
69 if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
60 return; 70 return;
61 break; 71
72 pat_disable("PAT WC disabled due to known CPU erratum.");
73 return;
74
62 case X86_VENDOR_AMD: 75 case X86_VENDOR_AMD:
63 case X86_VENDOR_CENTAUR: 76 case X86_VENDOR_CENTAUR:
64 case X86_VENDOR_TRANSMETA: 77 case X86_VENDOR_TRANSMETA:
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c9b58a806e85..c8e315f1aa83 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -50,6 +50,8 @@ static double __initdata y = 3145727.0;
50 */ 50 */
51static void __init check_fpu(void) 51static void __init check_fpu(void)
52{ 52{
53 s32 fdiv_bug;
54
53 if (!boot_cpu_data.hard_math) { 55 if (!boot_cpu_data.hard_math) {
54#ifndef CONFIG_MATH_EMULATION 56#ifndef CONFIG_MATH_EMULATION
55 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); 57 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
@@ -74,8 +76,10 @@ static void __init check_fpu(void)
74 "fistpl %0\n\t" 76 "fistpl %0\n\t"
75 "fwait\n\t" 77 "fwait\n\t"
76 "fninit" 78 "fninit"
77 : "=m" (*&boot_cpu_data.fdiv_bug) 79 : "=m" (*&fdiv_bug)
78 : "m" (*&x), "m" (*&y)); 80 : "m" (*&x), "m" (*&y));
81
82 boot_cpu_data.fdiv_bug = fdiv_bug;
79 if (boot_cpu_data.fdiv_bug) 83 if (boot_cpu_data.fdiv_bug)
80 printk("Hmm, FPU with FDIV bug.\n"); 84 printk("Hmm, FPU with FDIV bug.\n");
81} 85}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 4e7271999a74..84bb395038d8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -737,63 +737,44 @@ static int find_psb_table(struct powernow_k8_data *data)
737#ifdef CONFIG_X86_POWERNOW_K8_ACPI 737#ifdef CONFIG_X86_POWERNOW_K8_ACPI
738static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) 738static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
739{ 739{
740 if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) 740 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
741 return; 741 return;
742 742
743 data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; 743 data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
744 data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; 744 data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
745 data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 745 data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
746 data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; 746 data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
747 data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); 747 data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
748 data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; 748 data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
749}
750
751
752static struct acpi_processor_performance *acpi_perf_data;
753static int preregister_valid;
754
755static int powernow_k8_cpu_preinit_acpi(void)
756{
757 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
758 if (!acpi_perf_data)
759 return -ENODEV;
760
761 if (acpi_processor_preregister_performance(acpi_perf_data))
762 return -ENODEV;
763 else
764 preregister_valid = 1;
765 return 0;
766} 749}
767 750
768static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 751static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
769{ 752{
770 struct cpufreq_frequency_table *powernow_table; 753 struct cpufreq_frequency_table *powernow_table;
771 int ret_val; 754 int ret_val;
772 int cpu = 0;
773 755
774 data->acpi_data = percpu_ptr(acpi_perf_data, cpu); 756 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
775 if (acpi_processor_register_performance(data->acpi_data, data->cpu)) {
776 dprintk("register performance failed: bad ACPI data\n"); 757 dprintk("register performance failed: bad ACPI data\n");
777 return -EIO; 758 return -EIO;
778 } 759 }
779 760
780 /* verify the data contained in the ACPI structures */ 761 /* verify the data contained in the ACPI structures */
781 if (data->acpi_data->state_count <= 1) { 762 if (data->acpi_data.state_count <= 1) {
782 dprintk("No ACPI P-States\n"); 763 dprintk("No ACPI P-States\n");
783 goto err_out; 764 goto err_out;
784 } 765 }
785 766
786 if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 767 if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
787 (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 768 (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
788 dprintk("Invalid control/status registers (%x - %x)\n", 769 dprintk("Invalid control/status registers (%x - %x)\n",
789 data->acpi_data->control_register.space_id, 770 data->acpi_data.control_register.space_id,
790 data->acpi_data->status_register.space_id); 771 data->acpi_data.status_register.space_id);
791 goto err_out; 772 goto err_out;
792 } 773 }
793 774
794 /* fill in data->powernow_table */ 775 /* fill in data->powernow_table */
795 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) 776 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
796 * (data->acpi_data->state_count + 1)), GFP_KERNEL); 777 * (data->acpi_data.state_count + 1)), GFP_KERNEL);
797 if (!powernow_table) { 778 if (!powernow_table) {
798 dprintk("powernow_table memory alloc failure\n"); 779 dprintk("powernow_table memory alloc failure\n");
799 goto err_out; 780 goto err_out;
@@ -806,12 +787,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
806 if (ret_val) 787 if (ret_val)
807 goto err_out_mem; 788 goto err_out_mem;
808 789
809 powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; 790 powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
810 powernow_table[data->acpi_data->state_count].index = 0; 791 powernow_table[data->acpi_data.state_count].index = 0;
811 data->powernow_table = powernow_table; 792 data->powernow_table = powernow_table;
812 793
813 /* fill in data */ 794 /* fill in data */
814 data->numps = data->acpi_data->state_count; 795 data->numps = data->acpi_data.state_count;
815 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 796 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
816 print_basics(data); 797 print_basics(data);
817 powernow_k8_acpi_pst_values(data, 0); 798 powernow_k8_acpi_pst_values(data, 0);
@@ -819,31 +800,16 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
819 /* notify BIOS that we exist */ 800 /* notify BIOS that we exist */
820 acpi_processor_notify_smm(THIS_MODULE); 801 acpi_processor_notify_smm(THIS_MODULE);
821 802
822 /* determine affinity, from ACPI if available */
823 if (preregister_valid) {
824 if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) ||
825 (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY))
826 data->starting_core_affinity = data->acpi_data->shared_cpu_map;
827 else
828 data->starting_core_affinity = cpumask_of_cpu(data->cpu);
829 } else {
830 /* best guess from family if not */
831 if (cpu_family == CPU_HW_PSTATE)
832 data->starting_core_affinity = cpumask_of_cpu(data->cpu);
833 else
834 data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu);
835 }
836
837 return 0; 803 return 0;
838 804
839err_out_mem: 805err_out_mem:
840 kfree(powernow_table); 806 kfree(powernow_table);
841 807
842err_out: 808err_out:
843 acpi_processor_unregister_performance(data->acpi_data, data->cpu); 809 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
844 810
845 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 811 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
846 data->acpi_data->state_count = 0; 812 data->acpi_data.state_count = 0;
847 813
848 return -ENODEV; 814 return -ENODEV;
849} 815}
@@ -855,10 +821,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
855 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); 821 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
856 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 822 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
857 823
858 for (i = 0; i < data->acpi_data->state_count; i++) { 824 for (i = 0; i < data->acpi_data.state_count; i++) {
859 u32 index; 825 u32 index;
860 826
861 index = data->acpi_data->states[i].control & HW_PSTATE_MASK; 827 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
862 if (index > data->max_hw_pstate) { 828 if (index > data->max_hw_pstate) {
863 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); 829 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
864 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); 830 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
@@ -874,7 +840,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
874 840
875 powernow_table[i].index = index; 841 powernow_table[i].index = index;
876 842
877 powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; 843 powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
878 } 844 }
879 return 0; 845 return 0;
880} 846}
@@ -883,16 +849,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
883{ 849{
884 int i; 850 int i;
885 int cntlofreq = 0; 851 int cntlofreq = 0;
886 for (i = 0; i < data->acpi_data->state_count; i++) { 852 for (i = 0; i < data->acpi_data.state_count; i++) {
887 u32 fid; 853 u32 fid;
888 u32 vid; 854 u32 vid;
889 855
890 if (data->exttype) { 856 if (data->exttype) {
891 fid = data->acpi_data->states[i].status & EXT_FID_MASK; 857 fid = data->acpi_data.states[i].status & EXT_FID_MASK;
892 vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; 858 vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
893 } else { 859 } else {
894 fid = data->acpi_data->states[i].control & FID_MASK; 860 fid = data->acpi_data.states[i].control & FID_MASK;
895 vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; 861 vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
896 } 862 }
897 863
898 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); 864 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
@@ -933,10 +899,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
933 cntlofreq = i; 899 cntlofreq = i;
934 } 900 }
935 901
936 if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { 902 if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
937 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", 903 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
938 powernow_table[i].frequency, 904 powernow_table[i].frequency,
939 (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); 905 (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
940 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; 906 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
941 continue; 907 continue;
942 } 908 }
@@ -946,12 +912,11 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
946 912
947static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 913static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
948{ 914{
949 if (data->acpi_data->state_count) 915 if (data->acpi_data.state_count)
950 acpi_processor_unregister_performance(data->acpi_data, data->cpu); 916 acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
951} 917}
952 918
953#else 919#else
954static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; }
955static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 920static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
956static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 921static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
957static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } 922static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
@@ -1136,7 +1101,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
1136static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1101static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1137{ 1102{
1138 struct powernow_k8_data *data; 1103 struct powernow_k8_data *data;
1139 cpumask_t oldmask = CPU_MASK_ALL; 1104 cpumask_t oldmask;
1140 int rc; 1105 int rc;
1141 1106
1142 if (!cpu_online(pol->cpu)) 1107 if (!cpu_online(pol->cpu))
@@ -1209,7 +1174,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1209 /* run on any CPU again */ 1174 /* run on any CPU again */
1210 set_cpus_allowed_ptr(current, &oldmask); 1175 set_cpus_allowed_ptr(current, &oldmask);
1211 1176
1212 pol->cpus = data->starting_core_affinity; 1177 if (cpu_family == CPU_HW_PSTATE)
1178 pol->cpus = cpumask_of_cpu(pol->cpu);
1179 else
1180 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1213 data->available_cores = &(pol->cpus); 1181 data->available_cores = &(pol->cpus);
1214 1182
1215 /* Take a crude guess here. 1183 /* Take a crude guess here.
@@ -1332,7 +1300,6 @@ static int __cpuinit powernowk8_init(void)
1332 } 1300 }
1333 1301
1334 if (supported_cpus == num_online_cpus()) { 1302 if (supported_cpus == num_online_cpus()) {
1335 powernow_k8_cpu_preinit_acpi();
1336 printk(KERN_INFO PFX "Found %d %s " 1303 printk(KERN_INFO PFX "Found %d %s "
1337 "processors (%d cpu cores) (" VERSION ")\n", 1304 "processors (%d cpu cores) (" VERSION ")\n",
1338 num_online_nodes(), 1305 num_online_nodes(),
@@ -1349,10 +1316,6 @@ static void __exit powernowk8_exit(void)
1349 dprintk("exit\n"); 1316 dprintk("exit\n");
1350 1317
1351 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1318 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1352
1353#ifdef CONFIG_X86_POWERNOW_K8_ACPI
1354 free_percpu(acpi_perf_data);
1355#endif
1356} 1319}
1357 1320
1358MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); 1321MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index a62612cd4be8..ab48cfed4d96 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -33,13 +33,12 @@ struct powernow_k8_data {
33#ifdef CONFIG_X86_POWERNOW_K8_ACPI 33#ifdef CONFIG_X86_POWERNOW_K8_ACPI
34 /* the acpi table needs to be kept. it's only available if ACPI was 34 /* the acpi table needs to be kept. it's only available if ACPI was
35 * used to determine valid frequency/vid/fid states */ 35 * used to determine valid frequency/vid/fid states */
36 struct acpi_processor_performance *acpi_data; 36 struct acpi_processor_performance acpi_data;
37#endif 37#endif
38 /* we need to keep track of associated cores, but let cpufreq 38 /* we need to keep track of associated cores, but let cpufreq
39 * handle hotplug events - so just point at cpufreq pol->cpus 39 * handle hotplug events - so just point at cpufreq pol->cpus
40 * structure */ 40 * structure */
41 cpumask_t *available_cores; 41 cpumask_t *available_cores;
42 cpumask_t starting_core_affinity;
43}; 42};
44 43
45 44
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 3fd7a67bb06a..e710a21bb6e8 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -134,23 +134,6 @@ static void __cpuinit set_cx86_memwb(void)
134 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); 134 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
135} 135}
136 136
137static void __cpuinit set_cx86_inc(void)
138{
139 unsigned char ccr3;
140
141 printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
142
143 ccr3 = getCx86(CX86_CCR3);
144 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
145 /* PCR1 -- Performance Control */
146 /* Incrementor on, whatever that is */
147 setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
148 /* PCR0 -- Performance Control */
149 /* Incrementor Margin 10 */
150 setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
151 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
152}
153
154/* 137/*
155 * Configure later MediaGX and/or Geode processor. 138 * Configure later MediaGX and/or Geode processor.
156 */ 139 */
@@ -174,7 +157,6 @@ static void __cpuinit geode_configure(void)
174 157
175 set_cx86_memwb(); 158 set_cx86_memwb();
176 set_cx86_reorder(); 159 set_cx86_reorder();
177 set_cx86_inc();
178 160
179 local_irq_restore(flags); 161 local_irq_restore(flags);
180} 162}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 65a339678ece..726a5fcdf341 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -759,6 +759,7 @@ static struct sysdev_class mce_sysclass = {
759}; 759};
760 760
761DEFINE_PER_CPU(struct sys_device, device_mce); 761DEFINE_PER_CPU(struct sys_device, device_mce);
762void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata;
762 763
763/* Why are there no generic functions for this? */ 764/* Why are there no generic functions for this? */
764#define ACCESSOR(name, var, start) \ 765#define ACCESSOR(name, var, start) \
@@ -883,9 +884,13 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
883 case CPU_ONLINE: 884 case CPU_ONLINE:
884 case CPU_ONLINE_FROZEN: 885 case CPU_ONLINE_FROZEN:
885 mce_create_device(cpu); 886 mce_create_device(cpu);
887 if (threshold_cpu_callback)
888 threshold_cpu_callback(action, cpu);
886 break; 889 break;
887 case CPU_DEAD: 890 case CPU_DEAD:
888 case CPU_DEAD_FROZEN: 891 case CPU_DEAD_FROZEN:
892 if (threshold_cpu_callback)
893 threshold_cpu_callback(action, cpu);
889 mce_remove_device(cpu); 894 mce_remove_device(cpu);
890 break; 895 break;
891 } 896 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 88736cadbaa6..5eb390a4b2e9 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -628,6 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
628 deallocate_threshold_block(cpu, bank); 628 deallocate_threshold_block(cpu, bank);
629 629
630free_out: 630free_out:
631 kobject_del(b->kobj);
631 kobject_put(b->kobj); 632 kobject_put(b->kobj);
632 kfree(b); 633 kfree(b);
633 per_cpu(threshold_banks, cpu)[bank] = NULL; 634 per_cpu(threshold_banks, cpu)[bank] = NULL;
@@ -645,14 +646,11 @@ static void threshold_remove_device(unsigned int cpu)
645} 646}
646 647
647/* get notified when a cpu comes on/off */ 648/* get notified when a cpu comes on/off */
648static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, 649static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action,
649 unsigned long action, void *hcpu) 650 unsigned int cpu)
650{ 651{
651 /* cpu was unsigned int to begin with */
652 unsigned int cpu = (unsigned long)hcpu;
653
654 if (cpu >= NR_CPUS) 652 if (cpu >= NR_CPUS)
655 goto out; 653 return;
656 654
657 switch (action) { 655 switch (action) {
658 case CPU_ONLINE: 656 case CPU_ONLINE:
@@ -666,14 +664,8 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
666 default: 664 default:
667 break; 665 break;
668 } 666 }
669 out:
670 return NOTIFY_OK;
671} 667}
672 668
673static struct notifier_block threshold_cpu_notifier __cpuinitdata = {
674 .notifier_call = threshold_cpu_callback,
675};
676
677static __init int threshold_init_device(void) 669static __init int threshold_init_device(void)
678{ 670{
679 unsigned lcpu = 0; 671 unsigned lcpu = 0;
@@ -684,7 +676,7 @@ static __init int threshold_init_device(void)
684 if (err) 676 if (err)
685 return err; 677 return err;
686 } 678 }
687 register_hotcpu_notifier(&threshold_cpu_notifier); 679 threshold_cpu_callback = amd_64_threshold_cpu_callback;
688 return 0; 680 return 0;
689} 681}
690 682
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 509bd3d9eacd..cb7d3b6a80eb 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -379,6 +379,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
379 unsigned long *size, mtrr_type *type) 379 unsigned long *size, mtrr_type *type)
380{ 380{
381 unsigned int mask_lo, mask_hi, base_lo, base_hi; 381 unsigned int mask_lo, mask_hi, base_lo, base_hi;
382 unsigned int tmp, hi;
382 383
383 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 384 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
384 if ((mask_lo & 0x800) == 0) { 385 if ((mask_lo & 0x800) == 0) {
@@ -392,8 +393,23 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
392 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 393 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
393 394
394 /* Work out the shifted address mask. */ 395 /* Work out the shifted address mask. */
395 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) 396 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
396 | mask_lo >> PAGE_SHIFT; 397 mask_lo = size_or_mask | tmp;
398 /* Expand tmp with high bits to all 1s*/
399 hi = fls(tmp);
400 if (hi > 0) {
401 tmp |= ~((1<<(hi - 1)) - 1);
402
403 if (tmp != mask_lo) {
404 static int once = 1;
405
406 if (once) {
407 printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
408 once = 0;
409 }
410 mask_lo = tmp;
411 }
412 }
397 413
398 /* This works correctly if size is a power of two, i.e. a 414 /* This works correctly if size is a power of two, i.e. a
399 contiguous range. */ 415 contiguous range. */
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 6f23969c8faf..b117d7f8a564 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -1496,11 +1496,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1496 1496
1497 /* kvm/qemu doesn't have mtrr set right, don't trim them all */ 1497 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
1498 if (!highest_pfn) { 1498 if (!highest_pfn) {
1499 if (!kvm_para_available()) { 1499 WARN(!kvm_para_available(), KERN_WARNING
1500 printk(KERN_WARNING
1501 "WARNING: strange, CPU MTRRs all blank?\n"); 1500 "WARNING: strange, CPU MTRRs all blank?\n");
1502 WARN_ON(1);
1503 }
1504 return 0; 1501 return 0;
1505 } 1502 }
1506 1503
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index de7439f82b92..05cc22dbd4ff 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -478,7 +478,13 @@ static int setup_p4_watchdog(unsigned nmi_hz)
478 perfctr_msr = MSR_P4_IQ_PERFCTR1; 478 perfctr_msr = MSR_P4_IQ_PERFCTR1;
479 evntsel_msr = MSR_P4_CRU_ESCR0; 479 evntsel_msr = MSR_P4_CRU_ESCR0;
480 cccr_msr = MSR_P4_IQ_CCCR1; 480 cccr_msr = MSR_P4_IQ_CCCR1;
481 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); 481
482 /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */
483 if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4)
484 cccr_val = P4_CCCR_OVF_PMI0;
485 else
486 cccr_val = P4_CCCR_OVF_PMI1;
487 cccr_val |= P4_CCCR_ESCR_SELECT(4);
482 } 488 }
483 489
484 evntsel = P4_ESCR_EVENT_SELECT(0x3F) 490 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 14b11b3be31c..8e9cd6a8ec12 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -89,6 +89,8 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
89 struct cpuid_regs cmd; 89 struct cpuid_regs cmd;
90 int cpu = iminor(file->f_path.dentry->d_inode); 90 int cpu = iminor(file->f_path.dentry->d_inode);
91 u64 pos = *ppos; 91 u64 pos = *ppos;
92 ssize_t bytes = 0;
93 int err = 0;
92 94
93 if (count % 16) 95 if (count % 16)
94 return -EINVAL; /* Invalid chunk size */ 96 return -EINVAL; /* Invalid chunk size */
@@ -96,14 +98,19 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
96 for (; count; count -= 16) { 98 for (; count; count -= 16) {
97 cmd.eax = pos; 99 cmd.eax = pos;
98 cmd.ecx = pos >> 32; 100 cmd.ecx = pos >> 32;
99 smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); 101 err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
100 if (copy_to_user(tmp, &cmd, 16)) 102 if (err)
101 return -EFAULT; 103 break;
104 if (copy_to_user(tmp, &cmd, 16)) {
105 err = -EFAULT;
106 break;
107 }
102 tmp += 16; 108 tmp += 16;
109 bytes += 16;
103 *ppos = ++pos; 110 *ppos = ++pos;
104 } 111 }
105 112
106 return tmp - buf; 113 return bytes ? bytes : err;
107} 114}
108 115
109static int cpuid_open(struct inode *inode, struct file *file) 116static int cpuid_open(struct inode *inode, struct file *file)
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
index 4b63c8e1f13b..5cab48ee61a4 100644
--- a/arch/x86/kernel/efi_32.c
+++ b/arch/x86/kernel/efi_32.c
@@ -53,7 +53,7 @@ void efi_call_phys_prelog(void)
53 * directory. If I have PAE, I just need to duplicate one entry in 53 * directory. If I have PAE, I just need to duplicate one entry in
54 * page directory. 54 * page directory.
55 */ 55 */
56 cr4 = read_cr4(); 56 cr4 = read_cr4_safe();
57 57
58 if (cr4 & X86_CR4_PAE) { 58 if (cr4 & X86_CR4_PAE) {
59 efi_bak_pg_dir_pointer[0].pgd = 59 efi_bak_pg_dir_pointer[0].pgd =
@@ -91,7 +91,7 @@ void efi_call_phys_epilog(void)
91 gdt_descr.size = GDT_SIZE - 1; 91 gdt_descr.size = GDT_SIZE - 1;
92 load_gdt(&gdt_descr); 92 load_gdt(&gdt_descr);
93 93
94 cr4 = read_cr4(); 94 cr4 = read_cr4_safe();
95 95
96 if (cr4 & X86_CR4_PAE) { 96 if (cr4 & X86_CR4_PAE) {
97 swapper_pg_dir[pgd_index(0)].pgd = 97 swapper_pg_dir[pgd_index(0)].pgd =
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 2cfcbded888a..bfa837cb16be 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -222,7 +222,7 @@ static __init void map_low_mmrs(void)
222 222
223enum map_type {map_wb, map_uc}; 223enum map_type {map_wb, map_uc};
224 224
225static void map_high(char *id, unsigned long base, int shift, enum map_type map_type) 225static __init void map_high(char *id, unsigned long base, int shift, enum map_type map_type)
226{ 226{
227 unsigned long bytes, paddr; 227 unsigned long bytes, paddr;
228 228
@@ -293,7 +293,9 @@ static __init void uv_rtc_init(void)
293 sn_rtc_cycles_per_second = ticks_per_sec; 293 sn_rtc_cycles_per_second = ticks_per_sec;
294} 294}
295 295
296static __init void uv_system_init(void) 296static bool uv_system_inited;
297
298void __init uv_system_init(void)
297{ 299{
298 union uvh_si_addr_map_config_u m_n_config; 300 union uvh_si_addr_map_config_u m_n_config;
299 union uvh_node_id_u node_id; 301 union uvh_node_id_u node_id;
@@ -383,6 +385,7 @@ static __init void uv_system_init(void)
383 map_mmr_high(max_pnode); 385 map_mmr_high(max_pnode);
384 map_config_high(max_pnode); 386 map_config_high(max_pnode);
385 map_mmioh_high(max_pnode); 387 map_mmioh_high(max_pnode);
388 uv_system_inited = true;
386} 389}
387 390
388/* 391/*
@@ -391,8 +394,7 @@ static __init void uv_system_init(void)
391 */ 394 */
392void __cpuinit uv_cpu_init(void) 395void __cpuinit uv_cpu_init(void)
393{ 396{
394 if (!uv_node_to_blade) 397 BUG_ON(!uv_system_inited);
395 uv_system_init();
396 398
397 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; 399 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
398 400
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 1b318e903bf6..9bfc4d72fb2e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -88,6 +88,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
88 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); 88 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
89 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 89 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
90 (__START_KERNEL & PGDIR_MASK))); 90 (__START_KERNEL & PGDIR_MASK)));
91 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
91 92
92 /* clear bss before set_intr_gate with early_idt_handler */ 93 /* clear bss before set_intr_gate with early_idt_handler */
93 clear_bss(); 94 clear_bss();
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index ad2b15a1334d..59fd3b6b1303 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -359,6 +359,7 @@ static int hpet_clocksource_register(void)
359int __init hpet_enable(void) 359int __init hpet_enable(void)
360{ 360{
361 unsigned long id; 361 unsigned long id;
362 int i;
362 363
363 if (!is_hpet_capable()) 364 if (!is_hpet_capable())
364 return 0; 365 return 0;
@@ -369,6 +370,29 @@ int __init hpet_enable(void)
369 * Read the period and check for a sane value: 370 * Read the period and check for a sane value:
370 */ 371 */
371 hpet_period = hpet_readl(HPET_PERIOD); 372 hpet_period = hpet_readl(HPET_PERIOD);
373
374 /*
375 * AMD SB700 based systems with spread spectrum enabled use a
376 * SMM based HPET emulation to provide proper frequency
377 * setting. The SMM code is initialized with the first HPET
378 * register access and takes some time to complete. During
379 * this time the config register reads 0xffffffff. We check
380 * for max. 1000 loops whether the config register reads a non
381 * 0xffffffff value to make sure that HPET is up and running
382 * before we go further. A counting loop is safe, as the HPET
383 * access takes thousands of CPU cycles. On non SB700 based
384 * machines this check is only done once and has no side
385 * effects.
386 */
387 for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
388 if (i == 1000) {
389 printk(KERN_WARNING
390 "HPET config register value = 0xFFFFFFFF. "
391 "Disabling HPET\n");
392 goto out_nohpet;
393 }
394 }
395
372 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) 396 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
373 goto out_nohpet; 397 goto out_nohpet;
374 398
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index de9aa0e3a9c5..09cddb57bec4 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -57,7 +57,7 @@ atomic_t irq_mis_count;
57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 57static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
58 58
59static DEFINE_SPINLOCK(ioapic_lock); 59static DEFINE_SPINLOCK(ioapic_lock);
60static DEFINE_SPINLOCK(vector_lock); 60DEFINE_SPINLOCK(vector_lock);
61 61
62int timer_through_8259 __initdata; 62int timer_through_8259 __initdata;
63 63
@@ -1209,10 +1209,6 @@ static int assign_irq_vector(int irq)
1209 return vector; 1209 return vector;
1210} 1210}
1211 1211
1212void setup_vector_irq(int cpu)
1213{
1214}
1215
1216static struct irq_chip ioapic_chip; 1212static struct irq_chip ioapic_chip;
1217 1213
1218#define IOAPIC_AUTO -1 1214#define IOAPIC_AUTO -1
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 8269434d1707..61a83b70c18f 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -101,7 +101,7 @@ int timer_through_8259 __initdata;
101static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 101static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
102 102
103static DEFINE_SPINLOCK(ioapic_lock); 103static DEFINE_SPINLOCK(ioapic_lock);
104DEFINE_SPINLOCK(vector_lock); 104static DEFINE_SPINLOCK(vector_lock);
105 105
106/* 106/*
107 * # of IRQ routing registers 107 * # of IRQ routing registers
@@ -697,6 +697,19 @@ static int pin_2_irq(int idx, int apic, int pin)
697 return irq; 697 return irq;
698} 698}
699 699
700void lock_vector_lock(void)
701{
702 /* Used to the online set of cpus does not change
703 * during assign_irq_vector.
704 */
705 spin_lock(&vector_lock);
706}
707
708void unlock_vector_lock(void)
709{
710 spin_unlock(&vector_lock);
711}
712
700static int __assign_irq_vector(int irq, cpumask_t mask) 713static int __assign_irq_vector(int irq, cpumask_t mask)
701{ 714{
702 /* 715 /*
@@ -802,7 +815,7 @@ static void __clear_irq_vector(int irq)
802 cpus_clear(cfg->domain); 815 cpus_clear(cfg->domain);
803} 816}
804 817
805static void __setup_vector_irq(int cpu) 818void __setup_vector_irq(int cpu)
806{ 819{
807 /* Initialize vector_irq on a new cpu */ 820 /* Initialize vector_irq on a new cpu */
808 /* This function must be called with vector_lock held */ 821 /* This function must be called with vector_lock held */
@@ -825,14 +838,6 @@ static void __setup_vector_irq(int cpu)
825 } 838 }
826} 839}
827 840
828void setup_vector_irq(int cpu)
829{
830 spin_lock(&vector_lock);
831 __setup_vector_irq(smp_processor_id());
832 spin_unlock(&vector_lock);
833}
834
835
836static struct irq_chip ioapic_chip; 841static struct irq_chip ioapic_chip;
837 842
838static void ioapic_register_intr(int irq, unsigned long trigger) 843static void ioapic_register_intr(int irq, unsigned long trigger)
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 9fe478d98406..0732adba05ca 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/numa.h> 13#include <linux/numa.h>
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/suspend.h>
15 16
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
17#include <asm/pgalloc.h> 18#include <asm/pgalloc.h>
@@ -78,7 +79,7 @@ static void load_segments(void)
78/* 79/*
79 * A architecture hook called to validate the 80 * A architecture hook called to validate the
80 * proposed image and prepare the control pages 81 * proposed image and prepare the control pages
81 * as needed. The pages for KEXEC_CONTROL_CODE_SIZE 82 * as needed. The pages for KEXEC_CONTROL_PAGE_SIZE
82 * have been allocated, but the segments have yet 83 * have been allocated, but the segments have yet
83 * been copied into the kernel. 84 * been copied into the kernel.
84 * 85 *
@@ -113,6 +114,7 @@ void machine_kexec(struct kimage *image)
113{ 114{
114 unsigned long page_list[PAGES_NR]; 115 unsigned long page_list[PAGES_NR];
115 void *control_page; 116 void *control_page;
117 int save_ftrace_enabled;
116 asmlinkage unsigned long 118 asmlinkage unsigned long
117 (*relocate_kernel_ptr)(unsigned long indirection_page, 119 (*relocate_kernel_ptr)(unsigned long indirection_page,
118 unsigned long control_page, 120 unsigned long control_page,
@@ -120,7 +122,12 @@ void machine_kexec(struct kimage *image)
120 unsigned int has_pae, 122 unsigned int has_pae,
121 unsigned int preserve_context); 123 unsigned int preserve_context);
122 124
123 tracer_disable(); 125#ifdef CONFIG_KEXEC_JUMP
126 if (kexec_image->preserve_context)
127 save_processor_state();
128#endif
129
130 save_ftrace_enabled = __ftrace_enabled_save();
124 131
125 /* Interrupts aren't acceptable while we reboot */ 132 /* Interrupts aren't acceptable while we reboot */
126 local_irq_disable(); 133 local_irq_disable();
@@ -138,7 +145,7 @@ void machine_kexec(struct kimage *image)
138 } 145 }
139 146
140 control_page = page_address(image->control_code_page); 147 control_page = page_address(image->control_code_page);
141 memcpy(control_page, relocate_kernel, PAGE_SIZE/2); 148 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
142 149
143 relocate_kernel_ptr = control_page; 150 relocate_kernel_ptr = control_page;
144 page_list[PA_CONTROL_PAGE] = __pa(control_page); 151 page_list[PA_CONTROL_PAGE] = __pa(control_page);
@@ -178,6 +185,13 @@ void machine_kexec(struct kimage *image)
178 (unsigned long)page_list, 185 (unsigned long)page_list,
179 image->start, cpu_has_pae, 186 image->start, cpu_has_pae,
180 image->preserve_context); 187 image->preserve_context);
188
189#ifdef CONFIG_KEXEC_JUMP
190 if (kexec_image->preserve_context)
191 restore_processor_state();
192#endif
193
194 __ftrace_enabled_restore(save_ftrace_enabled);
181} 195}
182 196
183void arch_crash_save_vmcoreinfo(void) 197void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 07c0f828f488..3b599518c322 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -33,6 +33,8 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <asm/geode.h> 34#include <asm/geode.h>
35 35
36#define MFGPT_DEFAULT_IRQ 7
37
36static struct mfgpt_timer_t { 38static struct mfgpt_timer_t {
37 unsigned int avail:1; 39 unsigned int avail:1;
38} mfgpt_timers[MFGPT_MAX_TIMERS]; 40} mfgpt_timers[MFGPT_MAX_TIMERS];
@@ -157,29 +159,48 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
157} 159}
158EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event); 160EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
159 161
160int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable) 162int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable)
161{ 163{
162 u32 val, dummy; 164 u32 zsel, lpc, dummy;
163 int offset; 165 int shift;
164 166
165 if (timer < 0 || timer >= MFGPT_MAX_TIMERS) 167 if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
166 return -EIO; 168 return -EIO;
167 169
168 if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) 170 /*
171 * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
172 * is using the same CMP of the timer's Siamese twin, the IRQ is set to
173 * 2, and we mustn't use nor change it.
174 * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
175 * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
176 * with *irq==0 is safe. Currently there _are_ no 2 drivers.
177 */
178 rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
179 shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4;
180 if (((zsel >> shift) & 0xF) == 2)
169 return -EIO; 181 return -EIO;
170 182
171 rdmsr(MSR_PIC_ZSEL_LOW, val, dummy); 183 /* Choose IRQ: if none supplied, keep IRQ already set or use default */
184 if (!*irq)
185 *irq = (zsel >> shift) & 0xF;
186 if (!*irq)
187 *irq = MFGPT_DEFAULT_IRQ;
172 188
173 offset = (timer % 4) * 4; 189 /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
174 190 if (*irq < 1 || *irq == 2 || *irq > 15)
175 val &= ~((0xF << offset) | (0xF << (offset + 16))); 191 return -EIO;
192 rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
193 if (lpc & (1 << *irq))
194 return -EIO;
176 195
196 /* All chosen and checked - go for it */
197 if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
198 return -EIO;
177 if (enable) { 199 if (enable) {
178 val |= (irq & 0x0F) << (offset); 200 zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
179 val |= (irq & 0x0F) << (offset + 16); 201 wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
180 } 202 }
181 203
182 wrmsr(MSR_PIC_ZSEL_LOW, val, dummy);
183 return 0; 204 return 0;
184} 205}
185 206
@@ -242,7 +263,7 @@ EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
242static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; 263static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
243static u16 mfgpt_event_clock; 264static u16 mfgpt_event_clock;
244 265
245static int irq = 7; 266static int irq;
246static int __init mfgpt_setup(char *str) 267static int __init mfgpt_setup(char *str)
247{ 268{
248 get_option(&str, &irq); 269 get_option(&str, &irq);
@@ -346,7 +367,7 @@ int __init mfgpt_timer_setup(void)
346 mfgpt_event_clock = timer; 367 mfgpt_event_clock = timer;
347 368
348 /* Set up the IRQ on the MFGPT side */ 369 /* Set up the IRQ on the MFGPT side */
349 if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) { 370 if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) {
350 printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq); 371 printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
351 return -EIO; 372 return -EIO;
352 } 373 }
@@ -374,13 +395,14 @@ int __init mfgpt_timer_setup(void)
374 &mfgpt_clockevent); 395 &mfgpt_clockevent);
375 396
376 printk(KERN_INFO 397 printk(KERN_INFO
377 "mfgpt-timer: registering the MFGPT timer as a clock event.\n"); 398 "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n",
399 timer, irq);
378 clockevents_register_device(&mfgpt_clockevent); 400 clockevents_register_device(&mfgpt_clockevent);
379 401
380 return 0; 402 return 0;
381 403
382err: 404err:
383 geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, irq); 405 geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq);
384 printk(KERN_ERR 406 printk(KERN_ERR
385 "mfgpt-timer: Unable to set up the MFGPT clock source\n"); 407 "mfgpt-timer: Unable to set up the MFGPT clock source\n");
386 return -EIO; 408 return -EIO;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index fdfdc550b366..efc2f361fe85 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata mmconf_dmi_table[] = {
238 {} 238 {}
239}; 239};
240 240
241void __init check_enable_amd_mmconf_dmi(void) 241void __cpuinit check_enable_amd_mmconf_dmi(void)
242{ 242{
243 dmi_check_system(mmconf_dmi_table); 243 dmi_check_system(mmconf_dmi_table);
244} 244}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 6ae005ccaed8..b3fb430725cb 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -49,7 +49,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
49 return sum & 0xFF; 49 return sum & 0xFF;
50} 50}
51 51
52static void __cpuinit MP_processor_info(struct mpc_config_processor *m) 52static void __init MP_processor_info(struct mpc_config_processor *m)
53{ 53{
54 int apicid; 54 int apicid;
55 char *bootup_cpu = ""; 55 char *bootup_cpu = "";
@@ -83,7 +83,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m)
83 if (x86_quirks->mpc_oem_bus_info) 83 if (x86_quirks->mpc_oem_bus_info)
84 x86_quirks->mpc_oem_bus_info(m, str); 84 x86_quirks->mpc_oem_bus_info(m, str);
85 else 85 else
86 printk(KERN_INFO "Bus #%d is %s\n", m->mpc_busid, str); 86 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str);
87 87
88#if MAX_MP_BUSSES < 256 88#if MAX_MP_BUSSES < 256
89 if (m->mpc_busid >= MAX_MP_BUSSES) { 89 if (m->mpc_busid >= MAX_MP_BUSSES) {
@@ -154,7 +154,7 @@ static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
154 154
155static void print_MP_intsrc_info(struct mpc_config_intsrc *m) 155static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
156{ 156{
157 printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," 157 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
158 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 158 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
159 m->mpc_irqtype, m->mpc_irqflag & 3, 159 m->mpc_irqtype, m->mpc_irqflag & 3,
160 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, 160 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
@@ -163,7 +163,7 @@ static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
163 163
164static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) 164static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
165{ 165{
166 printk(KERN_CONT "Int: type %d, pol %d, trig %d, bus %02x," 166 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
167 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 167 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
168 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, 168 mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
169 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, 169 (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
@@ -235,7 +235,7 @@ static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
235 235
236static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) 236static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
237{ 237{
238 printk(KERN_INFO "Lint: type %d, pol %d, trig %d, bus %02x," 238 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
239 " IRQ %02x, APIC ID %x, APIC LINT %02x\n", 239 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
240 m->mpc_irqtype, m->mpc_irqflag & 3, 240 m->mpc_irqtype, m->mpc_irqflag & 3,
241 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, 241 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
@@ -484,7 +484,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
484} 484}
485 485
486 486
487static void construct_ioapic_table(int mpc_default_type) 487static void __init construct_ioapic_table(int mpc_default_type)
488{ 488{
489 struct mpc_config_ioapic ioapic; 489 struct mpc_config_ioapic ioapic;
490 struct mpc_config_bus bus; 490 struct mpc_config_bus bus;
@@ -529,7 +529,7 @@ static void construct_ioapic_table(int mpc_default_type)
529 construct_default_ioirq_mptable(mpc_default_type); 529 construct_default_ioirq_mptable(mpc_default_type);
530} 530}
531#else 531#else
532static inline void construct_ioapic_table(int mpc_default_type) { } 532static inline void __init construct_ioapic_table(int mpc_default_type) { }
533#endif 533#endif
534 534
535static inline void __init construct_default_ISA_mptable(int mpc_default_type) 535static inline void __init construct_default_ISA_mptable(int mpc_default_type)
@@ -695,7 +695,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
695 unsigned int *bp = phys_to_virt(base); 695 unsigned int *bp = phys_to_virt(base);
696 struct intel_mp_floating *mpf; 696 struct intel_mp_floating *mpf;
697 697
698 printk(KERN_DEBUG "Scan SMP from %p for %ld bytes.\n", bp, length); 698 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
699 bp, length);
699 BUILD_BUG_ON(sizeof(*mpf) != 16); 700 BUILD_BUG_ON(sizeof(*mpf) != 16);
700 701
701 while (length > 0) { 702 while (length > 0) {
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 9fd809552447..2e2af5d18191 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -72,21 +72,28 @@ static ssize_t msr_read(struct file *file, char __user *buf,
72 u32 data[2]; 72 u32 data[2];
73 u32 reg = *ppos; 73 u32 reg = *ppos;
74 int cpu = iminor(file->f_path.dentry->d_inode); 74 int cpu = iminor(file->f_path.dentry->d_inode);
75 int err; 75 int err = 0;
76 ssize_t bytes = 0;
76 77
77 if (count % 8) 78 if (count % 8)
78 return -EINVAL; /* Invalid chunk size */ 79 return -EINVAL; /* Invalid chunk size */
79 80
80 for (; count; count -= 8) { 81 for (; count; count -= 8) {
81 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); 82 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
82 if (err) 83 if (err) {
83 return -EIO; 84 if (err == -EFAULT) /* Fix idiotic error code */
84 if (copy_to_user(tmp, &data, 8)) 85 err = -EIO;
85 return -EFAULT; 86 break;
87 }
88 if (copy_to_user(tmp, &data, 8)) {
89 err = -EFAULT;
90 break;
91 }
86 tmp += 2; 92 tmp += 2;
93 bytes += 8;
87 } 94 }
88 95
89 return ((char __user *)tmp) - buf; 96 return bytes ? bytes : err;
90} 97}
91 98
92static ssize_t msr_write(struct file *file, const char __user *buf, 99static ssize_t msr_write(struct file *file, const char __user *buf,
@@ -96,21 +103,28 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
96 u32 data[2]; 103 u32 data[2];
97 u32 reg = *ppos; 104 u32 reg = *ppos;
98 int cpu = iminor(file->f_path.dentry->d_inode); 105 int cpu = iminor(file->f_path.dentry->d_inode);
99 int err; 106 int err = 0;
107 ssize_t bytes = 0;
100 108
101 if (count % 8) 109 if (count % 8)
102 return -EINVAL; /* Invalid chunk size */ 110 return -EINVAL; /* Invalid chunk size */
103 111
104 for (; count; count -= 8) { 112 for (; count; count -= 8) {
105 if (copy_from_user(&data, tmp, 8)) 113 if (copy_from_user(&data, tmp, 8)) {
106 return -EFAULT; 114 err = -EFAULT;
115 break;
116 }
107 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); 117 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
108 if (err) 118 if (err) {
109 return -EIO; 119 if (err == -EFAULT) /* Fix idiotic error code */
120 err = -EIO;
121 break;
122 }
110 tmp += 2; 123 tmp += 2;
124 bytes += 8;
111 } 125 }
112 126
113 return ((char __user *)tmp) - buf; 127 return bytes ? bytes : err;
114} 128}
115 129
116static int msr_open(struct inode *inode, struct file *file) 130static int msr_open(struct inode *inode, struct file *file)
@@ -131,7 +145,7 @@ static int msr_open(struct inode *inode, struct file *file)
131 ret = -EIO; /* MSR not supported */ 145 ret = -EIO; /* MSR not supported */
132out: 146out:
133 unlock_kernel(); 147 unlock_kernel();
134 return 0; 148 return ret;
135} 149}
136 150
137/* 151/*
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index ac6d51222e7d..abb78a2cc4ad 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -114,6 +114,23 @@ static __init void nmi_cpu_busy(void *data)
114} 114}
115#endif 115#endif
116 116
117static void report_broken_nmi(int cpu, int *prev_nmi_count)
118{
119 printk(KERN_CONT "\n");
120
121 printk(KERN_WARNING
122 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
123 cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
124
125 printk(KERN_WARNING
126 "Please report this to bugzilla.kernel.org,\n");
127 printk(KERN_WARNING
128 "and attach the output of the 'dmesg' command.\n");
129
130 per_cpu(wd_enabled, cpu) = 0;
131 atomic_dec(&nmi_active);
132}
133
117int __init check_nmi_watchdog(void) 134int __init check_nmi_watchdog(void)
118{ 135{
119 unsigned int *prev_nmi_count; 136 unsigned int *prev_nmi_count;
@@ -141,15 +158,8 @@ int __init check_nmi_watchdog(void)
141 for_each_online_cpu(cpu) { 158 for_each_online_cpu(cpu) {
142 if (!per_cpu(wd_enabled, cpu)) 159 if (!per_cpu(wd_enabled, cpu))
143 continue; 160 continue;
144 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { 161 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
145 printk(KERN_WARNING "WARNING: CPU#%d: NMI " 162 report_broken_nmi(cpu, prev_nmi_count);
146 "appears to be stuck (%d->%d)!\n",
147 cpu,
148 prev_nmi_count[cpu],
149 get_nmi_count(cpu));
150 per_cpu(wd_enabled, cpu) = 0;
151 atomic_dec(&nmi_active);
152 }
153 } 163 }
154 endflag = 1; 164 endflag = 1;
155 if (!atomic_read(&nmi_active)) { 165 if (!atomic_read(&nmi_active)) {
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index b8c45610b20a..eecc8c18f010 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -73,7 +73,7 @@ static void __init smp_dump_qct(void)
73} 73}
74 74
75 75
76void __init numaq_tsc_disable(void) 76void __cpuinit numaq_tsc_disable(void)
77{ 77{
78 if (!found_numaq) 78 if (!found_numaq)
79 return; 79 return;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 94da4d52d798..300da17e61cb 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -471,7 +471,7 @@ struct pv_lock_ops pv_lock_ops = {
471 .spin_unlock = __ticket_spin_unlock, 471 .spin_unlock = __ticket_spin_unlock,
472#endif 472#endif
473}; 473};
474EXPORT_SYMBOL_GPL(pv_lock_ops); 474EXPORT_SYMBOL(pv_lock_ops);
475 475
476EXPORT_SYMBOL_GPL(pv_time_ops); 476EXPORT_SYMBOL_GPL(pv_time_ops);
477EXPORT_SYMBOL (pv_cpu_ops); 477EXPORT_SYMBOL (pv_cpu_ops);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index b67a4b1d4eae..dcdac6c826e9 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -343,9 +343,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
343 /* were we called with bad_dma_address? */ 343 /* were we called with bad_dma_address? */
344 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); 344 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
345 if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { 345 if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
346 printk(KERN_ERR "Calgary: driver tried unmapping bad DMA " 346 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
347 "address 0x%Lx\n", dma_addr); 347 "address 0x%Lx\n", dma_addr);
348 WARN_ON(1);
349 return; 348 return;
350 } 349 }
351 350
@@ -1269,13 +1268,15 @@ static inline int __init determine_tce_table_size(u64 ram)
1269static int __init build_detail_arrays(void) 1268static int __init build_detail_arrays(void)
1270{ 1269{
1271 unsigned long ptr; 1270 unsigned long ptr;
1272 int i, scal_detail_size, rio_detail_size; 1271 unsigned numnodes, i;
1272 int scal_detail_size, rio_detail_size;
1273 1273
1274 if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){ 1274 numnodes = rio_table_hdr->num_scal_dev;
1275 if (numnodes > MAX_NUMNODES){
1275 printk(KERN_WARNING 1276 printk(KERN_WARNING
1276 "Calgary: MAX_NUMNODES too low! Defined as %d, " 1277 "Calgary: MAX_NUMNODES too low! Defined as %d, "
1277 "but system has %d nodes.\n", 1278 "but system has %d nodes.\n",
1278 MAX_NUMNODES, rio_table_hdr->num_scal_dev); 1279 MAX_NUMNODES, numnodes);
1279 return -ENODEV; 1280 return -ENODEV;
1280 } 1281 }
1281 1282
@@ -1296,8 +1297,7 @@ static int __init build_detail_arrays(void)
1296 } 1297 }
1297 1298
1298 ptr = ((unsigned long)rio_table_hdr) + 3; 1299 ptr = ((unsigned long)rio_table_hdr) + 3;
1299 for (i = 0; i < rio_table_hdr->num_scal_dev; 1300 for (i = 0; i < numnodes; i++, ptr += scal_detail_size)
1300 i++, ptr += scal_detail_size)
1301 scal_devs[i] = (struct scal_detail *)ptr; 1301 scal_devs[i] = (struct scal_detail *)ptr;
1302 1302
1303 for (i = 0; i < rio_table_hdr->num_rio_dev; 1303 for (i = 0; i < rio_table_hdr->num_rio_dev;
@@ -1350,7 +1350,7 @@ static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
1350 * Function for kdump case. Get the tce tables from first kernel 1350 * Function for kdump case. Get the tce tables from first kernel
1351 * by reading the contents of the base adress register of calgary iommu 1351 * by reading the contents of the base adress register of calgary iommu
1352 */ 1352 */
1353static void get_tce_space_from_tar() 1353static void __init get_tce_space_from_tar(void)
1354{ 1354{
1355 int bus; 1355 int bus;
1356 void __iomem *target; 1356 void __iomem *target;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 53bc653ed5ca..3b7a1ddcc0bc 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -95,7 +95,6 @@ static inline void play_dead(void)
95{ 95{
96 /* This must be done before dead CPU ack */ 96 /* This must be done before dead CPU ack */
97 cpu_exit_clear(); 97 cpu_exit_clear();
98 wbinvd();
99 mb(); 98 mb();
100 /* Ack it */ 99 /* Ack it */
101 __get_cpu_var(cpu_state) = CPU_DEAD; 100 __get_cpu_var(cpu_state) = CPU_DEAD;
@@ -104,8 +103,8 @@ static inline void play_dead(void)
104 * With physical CPU hotplug, we should halt the cpu 103 * With physical CPU hotplug, we should halt the cpu
105 */ 104 */
106 local_irq_disable(); 105 local_irq_disable();
107 while (1) 106 /* mask all interrupts, flush any and all caches, and halt */
108 halt(); 107 wbinvd_halt();
109} 108}
110#else 109#else
111static inline void play_dead(void) 110static inline void play_dead(void)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3fb62a7d9a16..71553b664e2a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -93,14 +93,13 @@ DECLARE_PER_CPU(int, cpu_state);
93static inline void play_dead(void) 93static inline void play_dead(void)
94{ 94{
95 idle_task_exit(); 95 idle_task_exit();
96 wbinvd();
97 mb(); 96 mb();
98 /* Ack it */ 97 /* Ack it */
99 __get_cpu_var(cpu_state) = CPU_DEAD; 98 __get_cpu_var(cpu_state) = CPU_DEAD;
100 99
101 local_irq_disable(); 100 local_irq_disable();
102 while (1) 101 /* mask all interrupts, flush any and all caches, and halt */
103 halt(); 102 wbinvd_halt();
104} 103}
105#else 104#else
106static inline void play_dead(void) 105static inline void play_dead(void)
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index 703310a99023..6f50664b2ba5 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -20,10 +20,11 @@
20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
21#define PAE_PGD_ATTR (_PAGE_PRESENT) 21#define PAE_PGD_ATTR (_PAGE_PRESENT)
22 22
23/* control_page + PAGE_SIZE/2 ~ control_page + PAGE_SIZE * 3/4 are 23/* control_page + KEXEC_CONTROL_CODE_MAX_SIZE
24 * used to save some data for jumping back 24 * ~ control_page + PAGE_SIZE are used as data storage and stack for
25 * jumping back
25 */ 26 */
26#define DATA(offset) (PAGE_SIZE/2+(offset)) 27#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
27 28
28/* Minimal CPU state */ 29/* Minimal CPU state */
29#define ESP DATA(0x0) 30#define ESP DATA(0x0)
@@ -376,3 +377,6 @@ swap_pages:
376 popl %ebx 377 popl %ebx
377 popl %ebp 378 popl %ebp
378 ret 379 ret
380
381 .globl kexec_control_code_size
382.set kexec_control_code_size, . - relocate_kernel
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2d888586385d..362d4e7f2d38 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -445,7 +445,7 @@ static void __init reserve_early_setup_data(void)
445 * @size: Size of the crashkernel memory to reserve. 445 * @size: Size of the crashkernel memory to reserve.
446 * Returns the base address on success, and -1ULL on failure. 446 * Returns the base address on success, and -1ULL on failure.
447 */ 447 */
448unsigned long long find_and_reserve_crashkernel(unsigned long long size) 448unsigned long long __init find_and_reserve_crashkernel(unsigned long long size)
449{ 449{
450 const unsigned long long alignment = 16<<20; /* 16M */ 450 const unsigned long long alignment = 16<<20; /* 16M */
451 unsigned long long start = 0LL; 451 unsigned long long start = 0LL;
@@ -670,6 +670,14 @@ void __init setup_arch(char **cmdline_p)
670 670
671 parse_early_param(); 671 parse_early_param();
672 672
673#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
674 /*
675 * Must be before kernel pagetables are setup
676 * or fixmap area is touched.
677 */
678 vmi_init();
679#endif
680
673 /* after early param, so could get panic from serial */ 681 /* after early param, so could get panic from serial */
674 reserve_early_setup_data(); 682 reserve_early_setup_data();
675 683
@@ -817,14 +825,6 @@ void __init setup_arch(char **cmdline_p)
817 kvmclock_init(); 825 kvmclock_init();
818#endif 826#endif
819 827
820#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
821 /*
822 * Must be after max_low_pfn is determined, and before kernel
823 * pagetables are setup.
824 */
825 vmi_init();
826#endif
827
828 paravirt_pagetable_setup_start(swapper_pg_dir); 828 paravirt_pagetable_setup_start(swapper_pg_dir);
829 paging_init(); 829 paging_init();
830 paravirt_pagetable_setup_done(swapper_pg_dir); 830 paravirt_pagetable_setup_done(swapper_pg_dir);
@@ -861,12 +861,6 @@ void __init setup_arch(char **cmdline_p)
861 init_apic_mappings(); 861 init_apic_mappings();
862 ioapic_init_mappings(); 862 ioapic_init_mappings();
863 863
864#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
865 if (def_to_bigsmp)
866 printk(KERN_WARNING "More than 8 CPUs detected and "
867 "CONFIG_X86_PC cannot handle it.\nUse "
868 "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
869#endif
870 kvm_guest_init(); 864 kvm_guest_init();
871 865
872 e820_reserve_resources(); 866 e820_reserve_resources();
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index b45ef8ddd651..ca316b5b742c 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -104,7 +104,16 @@ static inline int restore_i387(struct _fpstate __user *buf)
104 clts(); 104 clts();
105 task_thread_info(current)->status |= TS_USEDFPU; 105 task_thread_info(current)->status |= TS_USEDFPU;
106 } 106 }
107 return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); 107 err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
108 if (unlikely(err)) {
109 /*
110 * Encountered an error while doing the restore from the
111 * user buffer, clear the fpu state.
112 */
113 clear_fpu(tsk);
114 clear_used_math();
115 }
116 return err;
108} 117}
109 118
110/* 119/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 332512767f4f..7985c5b3f916 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -326,12 +326,16 @@ static void __cpuinit start_secondary(void *unused)
326 * for which cpus receive the IPI. Holding this 326 * for which cpus receive the IPI. Holding this
327 * lock helps us to not include this cpu in a currently in progress 327 * lock helps us to not include this cpu in a currently in progress
328 * smp_call_function(). 328 * smp_call_function().
329 *
330 * We need to hold vector_lock so there the set of online cpus
331 * does not change while we are assigning vectors to cpus. Holding
332 * this lock ensures we don't half assign or remove an irq from a cpu.
329 */ 333 */
330 ipi_call_lock_irq(); 334 ipi_call_lock_irq();
331#ifdef CONFIG_X86_IO_APIC 335 lock_vector_lock();
332 setup_vector_irq(smp_processor_id()); 336 __setup_vector_irq(smp_processor_id());
333#endif
334 cpu_set(smp_processor_id(), cpu_online_map); 337 cpu_set(smp_processor_id(), cpu_online_map);
338 unlock_vector_lock();
335 ipi_call_unlock_irq(); 339 ipi_call_unlock_irq();
336 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 340 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
337 341
@@ -752,6 +756,14 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
752} 756}
753 757
754#ifdef CONFIG_X86_64 758#ifdef CONFIG_X86_64
759
760/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
761static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
762{
763 if (!after_bootmem)
764 free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
765}
766
755/* 767/*
756 * Allocate node local memory for the AP pda. 768 * Allocate node local memory for the AP pda.
757 * 769 *
@@ -780,8 +792,7 @@ int __cpuinit get_local_pda(int cpu)
780 792
781 if (oldpda) { 793 if (oldpda) {
782 memcpy(newpda, oldpda, size); 794 memcpy(newpda, oldpda, size);
783 if (!after_bootmem) 795 free_bootmem_pda(oldpda);
784 free_bootmem((unsigned long)oldpda, size);
785 } 796 }
786 797
787 newpda->in_bootmem = 0; 798 newpda->in_bootmem = 0;
@@ -1044,6 +1055,34 @@ static __init void disable_smp(void)
1044static int __init smp_sanity_check(unsigned max_cpus) 1055static int __init smp_sanity_check(unsigned max_cpus)
1045{ 1056{
1046 preempt_disable(); 1057 preempt_disable();
1058
1059#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
1060 if (def_to_bigsmp && nr_cpu_ids > 8) {
1061 unsigned int cpu;
1062 unsigned nr;
1063
1064 printk(KERN_WARNING
1065 "More than 8 CPUs detected - skipping them.\n"
1066 "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
1067
1068 nr = 0;
1069 for_each_present_cpu(cpu) {
1070 if (nr >= 8)
1071 cpu_clear(cpu, cpu_present_map);
1072 nr++;
1073 }
1074
1075 nr = 0;
1076 for_each_possible_cpu(cpu) {
1077 if (nr >= 8)
1078 cpu_clear(cpu, cpu_possible_map);
1079 nr++;
1080 }
1081
1082 nr_cpu_ids = 8;
1083 }
1084#endif
1085
1047 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { 1086 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1048 printk(KERN_WARNING "weird, boot CPU (#%d) not listed" 1087 printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
1049 "by the BIOS.\n", hard_smp_processor_id()); 1088 "by the BIOS.\n", hard_smp_processor_id());
@@ -1182,6 +1221,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1182 printk(KERN_INFO "CPU%d: ", 0); 1221 printk(KERN_INFO "CPU%d: ", 0);
1183 print_cpu_info(&cpu_data(0)); 1222 print_cpu_info(&cpu_data(0));
1184 setup_boot_clock(); 1223 setup_boot_clock();
1224
1225 if (is_uv_system())
1226 uv_system_init();
1185out: 1227out:
1186 preempt_enable(); 1228 preempt_enable();
1187} 1229}
@@ -1336,7 +1378,9 @@ int __cpu_disable(void)
1336 remove_siblinginfo(cpu); 1378 remove_siblinginfo(cpu);
1337 1379
1338 /* It's now safe to remove this processor from the online map */ 1380 /* It's now safe to remove this processor from the online map */
1381 lock_vector_lock();
1339 remove_cpu_from_maps(cpu); 1382 remove_cpu_from_maps(cpu);
1383 unlock_vector_lock();
1340 fixup_irqs(cpu_online_map); 1384 fixup_irqs(cpu_online_map);
1341 return 0; 1385 return 0;
1342} 1386}
@@ -1370,17 +1414,3 @@ void __cpu_die(unsigned int cpu)
1370 BUG(); 1414 BUG();
1371} 1415}
1372#endif 1416#endif
1373
1374/*
1375 * If the BIOS enumerates physical processors before logical,
1376 * maxcpus=N at enumeration-time can be used to disable HT.
1377 */
1378static int __init parse_maxcpus(char *arg)
1379{
1380 extern unsigned int maxcpus;
1381
1382 if (arg)
1383 maxcpus = simple_strtoul(arg, NULL, 0);
1384 return 0;
1385}
1386early_param("maxcpus", parse_maxcpus);
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 99941b37eca0..397e309839dd 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -8,18 +8,21 @@
8DEFINE_PER_CPU(unsigned long, this_cpu_off); 8DEFINE_PER_CPU(unsigned long, this_cpu_off);
9EXPORT_PER_CPU_SYMBOL(this_cpu_off); 9EXPORT_PER_CPU_SYMBOL(this_cpu_off);
10 10
11/* Initialize the CPU's GDT. This is either the boot CPU doing itself 11/*
12 (still using the master per-cpu area), or a CPU doing it for a 12 * Initialize the CPU's GDT. This is either the boot CPU doing itself
13 secondary which will soon come up. */ 13 * (still using the master per-cpu area), or a CPU doing it for a
14 * secondary which will soon come up.
15 */
14__cpuinit void init_gdt(int cpu) 16__cpuinit void init_gdt(int cpu)
15{ 17{
16 struct desc_struct *gdt = get_cpu_gdt_table(cpu); 18 struct desc_struct gdt;
17 19
18 pack_descriptor(&gdt[GDT_ENTRY_PERCPU], 20 pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
19 __per_cpu_offset[cpu], 0xFFFFF,
20 0x2 | DESCTYPE_S, 0x8); 21 0x2 | DESCTYPE_S, 0x8);
22 gdt.s = 1;
21 23
22 gdt[GDT_ENTRY_PERCPU].s = 1; 24 write_gdt_entry(get_cpu_gdt_table(cpu),
25 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
23 26
24 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; 27 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
25 per_cpu(cpu_number, cpu) = cpu; 28 per_cpu(cpu_number, cpu) = cpu;
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index d0fbb7712ab0..8b8c0d6640fa 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -17,6 +17,7 @@
17#include <asm/genapic.h> 17#include <asm/genapic.h>
18#include <asm/idle.h> 18#include <asm/idle.h>
19#include <asm/tsc.h> 19#include <asm/tsc.h>
20#include <asm/irq_vectors.h>
20 21
21#include <mach_apic.h> 22#include <mach_apic.h>
22 23
@@ -783,7 +784,7 @@ static int __init uv_bau_init(void)
783 uv_init_blade(blade, node, cur_cpu); 784 uv_init_blade(blade, node, cur_cpu);
784 cur_cpu += uv_blade_nr_possible_cpus(blade); 785 cur_cpu += uv_blade_nr_possible_cpus(blade);
785 } 786 }
786 set_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 787 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
787 uv_enable_timeouts(); 788 uv_enable_timeouts();
788 789
789 return 0; 790 return 0;
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 3f18d73f420c..513caaca7115 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -1131,7 +1131,14 @@ asmlinkage void math_state_restore(void)
1131 } 1131 }
1132 1132
1133 clts(); /* Allow maths ops (or we recurse) */ 1133 clts(); /* Allow maths ops (or we recurse) */
1134 restore_fpu_checking(&me->thread.xstate->fxsave); 1134 /*
1135 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
1136 */
1137 if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) {
1138 stts();
1139 force_sig(SIGSEGV, me);
1140 return;
1141 }
1135 task_thread_info(me)->status |= TS_USEDFPU; 1142 task_thread_info(me)->status |= TS_USEDFPU;
1136 me->fpu_counter++; 1143 me->fpu_counter++;
1137} 1144}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7603c0553909..8f98e9de1b82 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup);
104/* 104/*
105 * Read TSC and the reference counters. Take care of SMI disturbance 105 * Read TSC and the reference counters. Take care of SMI disturbance
106 */ 106 */
107static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) 107static u64 tsc_read_refs(u64 *pm, u64 *hpet)
108{ 108{
109 u64 t1, t2; 109 u64 t1, t2;
110 int i; 110 int i;
@@ -122,80 +122,216 @@ static u64 __init tsc_read_refs(u64 *pm, u64 *hpet)
122 return ULLONG_MAX; 122 return ULLONG_MAX;
123} 123}
124 124
125/** 125/*
126 * native_calibrate_tsc - calibrate the tsc on boot 126 * Try to calibrate the TSC against the Programmable
127 * Interrupt Timer and return the frequency of the TSC
128 * in kHz.
129 *
130 * Return ULONG_MAX on failure to calibrate.
127 */ 131 */
128unsigned long native_calibrate_tsc(void) 132static unsigned long pit_calibrate_tsc(void)
129{ 133{
130 unsigned long flags; 134 u64 tsc, t1, t2, delta;
131 u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2; 135 unsigned long tscmin, tscmax;
132 int hpet = is_hpet_enabled(); 136 int pitcnt;
133 unsigned int tsc_khz_val = 0;
134
135 local_irq_save(flags);
136
137 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
138 137
138 /* Set the Gate high, disable speaker */
139 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 139 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
140 140
141 /*
142 * Setup CTC channel 2* for mode 0, (interrupt on terminal
143 * count mode), binary count. Set the latch register to 50ms
144 * (LSB then MSB) to begin countdown.
145 */
141 outb(0xb0, 0x43); 146 outb(0xb0, 0x43);
142 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); 147 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
143 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); 148 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
144 tr1 = get_cycles();
145 while ((inb(0x61) & 0x20) == 0);
146 tr2 = get_cycles();
147 149
148 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); 150 tsc = t1 = t2 = get_cycles();
149 151
150 local_irq_restore(flags); 152 pitcnt = 0;
153 tscmax = 0;
154 tscmin = ULONG_MAX;
155 while ((inb(0x61) & 0x20) == 0) {
156 t2 = get_cycles();
157 delta = t2 - tsc;
158 tsc = t2;
159 if ((unsigned long) delta < tscmin)
160 tscmin = (unsigned int) delta;
161 if ((unsigned long) delta > tscmax)
162 tscmax = (unsigned int) delta;
163 pitcnt++;
164 }
151 165
152 /* 166 /*
153 * Preset the result with the raw and inaccurate PIT 167 * Sanity checks:
154 * calibration value 168 *
169 * If we were not able to read the PIT more than 5000
170 * times, then we have been hit by a massive SMI
171 *
172 * If the maximum is 10 times larger than the minimum,
173 * then we got hit by an SMI as well.
155 */ 174 */
156 delta = (tr2 - tr1); 175 if (pitcnt < 5000 || tscmax > 10 * tscmin)
176 return ULONG_MAX;
177
178 /* Calculate the PIT value */
179 delta = t2 - t1;
157 do_div(delta, 50); 180 do_div(delta, 50);
158 tsc_khz_val = delta; 181 return delta;
182}
183
184
185/**
186 * native_calibrate_tsc - calibrate the tsc on boot
187 */
188unsigned long native_calibrate_tsc(void)
189{
190 u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2;
191 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
192 unsigned long flags;
193 int hpet = is_hpet_enabled(), i;
194
195 /*
196 * Run 5 calibration loops to get the lowest frequency value
197 * (the best estimate). We use two different calibration modes
198 * here:
199 *
200 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
201 * load a timeout of 50ms. We read the time right after we
202 * started the timer and wait until the PIT count down reaches
203 * zero. In each wait loop iteration we read the TSC and check
204 * the delta to the previous read. We keep track of the min
205 * and max values of that delta. The delta is mostly defined
206 * by the IO time of the PIT access, so we can detect when a
207 * SMI/SMM disturbance happend between the two reads. If the
208 * maximum time is significantly larger than the minimum time,
209 * then we discard the result and have another try.
210 *
211 * 2) Reference counter. If available we use the HPET or the
212 * PMTIMER as a reference to check the sanity of that value.
213 * We use separate TSC readouts and check inside of the
214 * reference read for a SMI/SMM disturbance. We dicard
215 * disturbed values here as well. We do that around the PIT
216 * calibration delay loop as we have to wait for a certain
217 * amount of time anyway.
218 */
219 for (i = 0; i < 5; i++) {
220 unsigned long tsc_pit_khz;
221
222 /*
223 * Read the start value and the reference count of
224 * hpet/pmtimer when available. Then do the PIT
225 * calibration, which will take at least 50ms, and
226 * read the end value.
227 */
228 local_irq_save(flags);
229 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
230 tsc_pit_khz = pit_calibrate_tsc();
231 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
232 local_irq_restore(flags);
233
234 /* Pick the lowest PIT TSC calibration so far */
235 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
236
237 /* hpet or pmtimer available ? */
238 if (!hpet && !pm1 && !pm2)
239 continue;
240
241 /* Check, whether the sampling was disturbed by an SMI */
242 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
243 continue;
244
245 tsc2 = (tsc2 - tsc1) * 1000000LL;
246
247 if (hpet) {
248 if (hpet2 < hpet1)
249 hpet2 += 0x100000000ULL;
250 hpet2 -= hpet1;
251 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
252 do_div(tsc1, 1000000);
253 } else {
254 if (pm2 < pm1)
255 pm2 += (u64)ACPI_PM_OVRRUN;
256 pm2 -= pm1;
257 tsc1 = pm2 * 1000000000LL;
258 do_div(tsc1, PMTMR_TICKS_PER_SEC);
259 }
260
261 do_div(tsc2, tsc1);
262 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
263 }
264
265 /*
266 * Now check the results.
267 */
268 if (tsc_pit_min == ULONG_MAX) {
269 /* PIT gave no useful value */
270 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
271
272 /* We don't have an alternative source, disable TSC */
273 if (!hpet && !pm1 && !pm2) {
274 printk("TSC: No reference (HPET/PMTIMER) available\n");
275 return 0;
276 }
277
278 /* The alternative source failed as well, disable TSC */
279 if (tsc_ref_min == ULONG_MAX) {
280 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
281 "failed due to SMI disturbance.\n");
282 return 0;
283 }
284
285 /* Use the alternative source */
286 printk(KERN_INFO "TSC: using %s reference calibration\n",
287 hpet ? "HPET" : "PMTIMER");
288
289 return tsc_ref_min;
290 }
159 291
160 /* hpet or pmtimer available ? */ 292 /* We don't have an alternative source, use the PIT calibration value */
161 if (!hpet && !pm1 && !pm2) { 293 if (!hpet && !pm1 && !pm2) {
162 printk(KERN_INFO "TSC calibrated against PIT\n"); 294 printk(KERN_INFO "TSC: Using PIT calibration value\n");
163 goto out; 295 return tsc_pit_min;
164 } 296 }
165 297
166 /* Check, whether the sampling was disturbed by an SMI */ 298 /* The alternative source failed, use the PIT calibration value */
167 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) { 299 if (tsc_ref_min == ULONG_MAX) {
168 printk(KERN_WARNING "TSC calibration disturbed by SMI, " 300 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due "
169 "using PIT calibration result\n"); 301 "to SMI disturbance. Using PIT calibration\n");
170 goto out; 302 return tsc_pit_min;
171 } 303 }
172 304
173 tsc2 = (tsc2 - tsc1) * 1000000LL; 305 /* Check the reference deviation */
174 306 delta = ((u64) tsc_pit_min) * 100;
175 if (hpet) { 307 do_div(delta, tsc_ref_min);
176 printk(KERN_INFO "TSC calibrated against HPET\n"); 308
177 if (hpet2 < hpet1) 309 /*
178 hpet2 += 0x100000000ULL; 310 * If both calibration results are inside a 5% window, the we
179 hpet2 -= hpet1; 311 * use the lower frequency of those as it is probably the
180 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 312 * closest estimate.
181 do_div(tsc1, 1000000); 313 */
182 } else { 314 if (delta >= 95 && delta <= 105) {
183 printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); 315 printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n",
184 if (pm2 < pm1) 316 hpet ? "HPET" : "PMTIMER");
185 pm2 += (u64)ACPI_PM_OVRRUN; 317 printk(KERN_INFO "TSC: using %s calibration value\n",
186 pm2 -= pm1; 318 tsc_pit_min <= tsc_ref_min ? "PIT" :
187 tsc1 = pm2 * 1000000000LL; 319 hpet ? "HPET" : "PMTIMER");
188 do_div(tsc1, PMTMR_TICKS_PER_SEC); 320 return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min;
189 } 321 }
190 322
191 do_div(tsc2, tsc1); 323 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
192 tsc_khz_val = tsc2; 324 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
193 325
194out: 326 /*
195 return tsc_khz_val; 327 * The calibration values differ too much. In doubt, we use
328 * the PIT value as we know that there are PMTIMERs around
329 * running at double speed.
330 */
331 printk(KERN_INFO "TSC: Using PIT calibration value\n");
332 return tsc_pit_min;
196} 333}
197 334
198
199#ifdef CONFIG_X86_32 335#ifdef CONFIG_X86_32
200/* Only called from the Powernow K7 cpu freq driver */ 336/* Only called from the Powernow K7 cpu freq driver */
201int recalibrate_cpu_khz(void) 337int recalibrate_cpu_khz(void)
@@ -314,7 +450,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
314 mark_tsc_unstable("cpufreq changes"); 450 mark_tsc_unstable("cpufreq changes");
315 } 451 }
316 452
317 set_cyc2ns_scale(tsc_khz_ref, freq->cpu); 453 set_cyc2ns_scale(tsc_khz, freq->cpu);
318 454
319 return 0; 455 return 0;
320} 456}
@@ -325,6 +461,10 @@ static struct notifier_block time_cpufreq_notifier_block = {
325 461
326static int __init cpufreq_tsc(void) 462static int __init cpufreq_tsc(void)
327{ 463{
464 if (!cpu_has_tsc)
465 return 0;
466 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
467 return 0;
328 cpufreq_register_notifier(&time_cpufreq_notifier_block, 468 cpufreq_register_notifier(&time_cpufreq_notifier_block,
329 CPUFREQ_TRANSITION_NOTIFIER); 469 CPUFREQ_TRANSITION_NOTIFIER);
330 return 0; 470 return 0;
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 0577825cf89b..9ffb01c31c40 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -88,11 +88,9 @@ static __cpuinit void check_tsc_warp(void)
88 __raw_spin_unlock(&sync_lock); 88 __raw_spin_unlock(&sync_lock);
89 } 89 }
90 } 90 }
91 if (!(now-start)) { 91 WARN(!(now-start),
92 printk("Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", 92 "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
93 now-start, end-start); 93 now-start, end-start);
94 WARN_ON(1);
95 }
96} 94}
97 95
98/* 96/*
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 41e01b145c48..594ef47f0a63 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -184,8 +184,6 @@ static int __init visws_get_smp_config(unsigned int early)
184 return 1; 184 return 1;
185} 185}
186 186
187extern unsigned int __cpuinitdata maxcpus;
188
189/* 187/*
190 * The Visual Workstation is Intel MP compliant in the hardware 188 * The Visual Workstation is Intel MP compliant in the hardware
191 * sense, but it doesn't have a BIOS(-configuration table). 189 * sense, but it doesn't have a BIOS(-configuration table).
@@ -244,8 +242,8 @@ static int __init visws_find_smp_config(unsigned int reserve)
244 ncpus = CO_CPU_MAX; 242 ncpus = CO_CPU_MAX;
245 } 243 }
246 244
247 if (ncpus > maxcpus) 245 if (ncpus > setup_max_cpus)
248 ncpus = maxcpus; 246 ncpus = setup_max_cpus;
249 247
250#ifdef CONFIG_X86_LOCAL_APIC 248#ifdef CONFIG_X86_LOCAL_APIC
251 smp_found_config = 1; 249 smp_found_config = 1;
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 0a1b1a9d922d..6ca515d6db54 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -37,6 +37,7 @@
37#include <asm/timer.h> 37#include <asm/timer.h>
38#include <asm/vmi_time.h> 38#include <asm/vmi_time.h>
39#include <asm/kmap_types.h> 39#include <asm/kmap_types.h>
40#include <asm/setup.h>
40 41
41/* Convenient for calling VMI functions indirectly in the ROM */ 42/* Convenient for calling VMI functions indirectly in the ROM */
42typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); 43typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -683,7 +684,7 @@ void vmi_bringup(void)
683{ 684{
684 /* We must establish the lowmem mapping for MMU ops to work */ 685 /* We must establish the lowmem mapping for MMU ops to work */
685 if (vmi_ops.set_linear_mapping) 686 if (vmi_ops.set_linear_mapping)
686 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0); 687 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
687} 688}
688 689
689/* 690/*
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index cdb2363697d2..af5bdad84604 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -209,3 +209,11 @@ SECTIONS
209 209
210 DWARF_DEBUG 210 DWARF_DEBUG
211} 211}
212
213#ifdef CONFIG_KEXEC
214/* Link time checks */
215#include <asm/kexec.h>
216
217ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
218 "kexec control code size is too big")
219#endif