diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 44 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/hypervisor.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/vmware.c | 21 |
5 files changed, 52 insertions, 49 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2055fc2b2e6b..2fea97eccf77 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
35 | #include <asm/msr.h> | 35 | #include <asm/msr.h> |
36 | #include <asm/pat.h> | 36 | #include <asm/pat.h> |
37 | #include <linux/smp.h> | ||
38 | 37 | ||
39 | #ifdef CONFIG_X86_LOCAL_APIC | 38 | #ifdef CONFIG_X86_LOCAL_APIC |
40 | #include <asm/uv/uv.h> | 39 | #include <asm/uv/uv.h> |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 479cc8c418c1..7d5c3b0ea8da 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -523,6 +523,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = { | |||
523 | }, | 523 | }, |
524 | { } | 524 | { } |
525 | }; | 525 | }; |
526 | |||
527 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) | ||
528 | { | ||
529 | /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf | ||
530 | * AL30: A Machine Check Exception (MCE) Occurring during an | ||
531 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause | ||
532 | * Both Processor Cores to Lock Up when HT is enabled*/ | ||
533 | if (c->x86_vendor == X86_VENDOR_INTEL) { | ||
534 | if ((c->x86 == 15) && | ||
535 | (c->x86_model == 6) && | ||
536 | (c->x86_mask == 8) && smt_capable()) | ||
537 | return -ENODEV; | ||
538 | } | ||
539 | return 0; | ||
540 | } | ||
526 | #endif | 541 | #endif |
527 | 542 | ||
528 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | 543 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
@@ -537,6 +552,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
537 | 552 | ||
538 | dprintk("acpi_cpufreq_cpu_init\n"); | 553 | dprintk("acpi_cpufreq_cpu_init\n"); |
539 | 554 | ||
555 | #ifdef CONFIG_SMP | ||
556 | result = acpi_cpufreq_blacklist(c); | ||
557 | if (result) | ||
558 | return result; | ||
559 | #endif | ||
560 | |||
540 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); | 561 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); |
541 | if (!data) | 562 | if (!data) |
542 | return -ENOMEM; | 563 | return -ENOMEM; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 2a50ef891000..6394aa5c7985 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -605,9 +605,10 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, | |||
605 | return 0; | 605 | return 0; |
606 | } | 606 | } |
607 | 607 | ||
608 | static void invalidate_entry(struct powernow_k8_data *data, unsigned int entry) | 608 | static void invalidate_entry(struct cpufreq_frequency_table *powernow_table, |
609 | unsigned int entry) | ||
609 | { | 610 | { |
610 | data->powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; | 611 | powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; |
611 | } | 612 | } |
612 | 613 | ||
613 | static void print_basics(struct powernow_k8_data *data) | 614 | static void print_basics(struct powernow_k8_data *data) |
@@ -854,6 +855,10 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
854 | goto err_out; | 855 | goto err_out; |
855 | } | 856 | } |
856 | 857 | ||
858 | /* fill in data */ | ||
859 | data->numps = data->acpi_data.state_count; | ||
860 | powernow_k8_acpi_pst_values(data, 0); | ||
861 | |||
857 | if (cpu_family == CPU_HW_PSTATE) | 862 | if (cpu_family == CPU_HW_PSTATE) |
858 | ret_val = fill_powernow_table_pstate(data, powernow_table); | 863 | ret_val = fill_powernow_table_pstate(data, powernow_table); |
859 | else | 864 | else |
@@ -866,11 +871,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
866 | powernow_table[data->acpi_data.state_count].index = 0; | 871 | powernow_table[data->acpi_data.state_count].index = 0; |
867 | data->powernow_table = powernow_table; | 872 | data->powernow_table = powernow_table; |
868 | 873 | ||
869 | /* fill in data */ | ||
870 | data->numps = data->acpi_data.state_count; | ||
871 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) | 874 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
872 | print_basics(data); | 875 | print_basics(data); |
873 | powernow_k8_acpi_pst_values(data, 0); | ||
874 | 876 | ||
875 | /* notify BIOS that we exist */ | 877 | /* notify BIOS that we exist */ |
876 | acpi_processor_notify_smm(THIS_MODULE); | 878 | acpi_processor_notify_smm(THIS_MODULE); |
@@ -914,13 +916,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, | |||
914 | "bad value %d.\n", i, index); | 916 | "bad value %d.\n", i, index); |
915 | printk(KERN_ERR PFX "Please report to BIOS " | 917 | printk(KERN_ERR PFX "Please report to BIOS " |
916 | "manufacturer\n"); | 918 | "manufacturer\n"); |
917 | invalidate_entry(data, i); | 919 | invalidate_entry(powernow_table, i); |
918 | continue; | 920 | continue; |
919 | } | 921 | } |
920 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | 922 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); |
921 | if (!(hi & HW_PSTATE_VALID_MASK)) { | 923 | if (!(hi & HW_PSTATE_VALID_MASK)) { |
922 | dprintk("invalid pstate %d, ignoring\n", index); | 924 | dprintk("invalid pstate %d, ignoring\n", index); |
923 | invalidate_entry(data, i); | 925 | invalidate_entry(powernow_table, i); |
924 | continue; | 926 | continue; |
925 | } | 927 | } |
926 | 928 | ||
@@ -941,7 +943,6 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
941 | struct cpufreq_frequency_table *powernow_table) | 943 | struct cpufreq_frequency_table *powernow_table) |
942 | { | 944 | { |
943 | int i; | 945 | int i; |
944 | int cntlofreq = 0; | ||
945 | 946 | ||
946 | for (i = 0; i < data->acpi_data.state_count; i++) { | 947 | for (i = 0; i < data->acpi_data.state_count; i++) { |
947 | u32 fid; | 948 | u32 fid; |
@@ -970,7 +971,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
970 | /* verify frequency is OK */ | 971 | /* verify frequency is OK */ |
971 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { | 972 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { |
972 | dprintk("invalid freq %u kHz, ignoring\n", freq); | 973 | dprintk("invalid freq %u kHz, ignoring\n", freq); |
973 | invalidate_entry(data, i); | 974 | invalidate_entry(powernow_table, i); |
974 | continue; | 975 | continue; |
975 | } | 976 | } |
976 | 977 | ||
@@ -978,38 +979,17 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
978 | * BIOSs are using "off" to indicate invalid */ | 979 | * BIOSs are using "off" to indicate invalid */ |
979 | if (vid == VID_OFF) { | 980 | if (vid == VID_OFF) { |
980 | dprintk("invalid vid %u, ignoring\n", vid); | 981 | dprintk("invalid vid %u, ignoring\n", vid); |
981 | invalidate_entry(data, i); | 982 | invalidate_entry(powernow_table, i); |
982 | continue; | 983 | continue; |
983 | } | 984 | } |
984 | 985 | ||
985 | /* verify only 1 entry from the lo frequency table */ | ||
986 | if (fid < HI_FID_TABLE_BOTTOM) { | ||
987 | if (cntlofreq) { | ||
988 | /* if both entries are the same, | ||
989 | * ignore this one ... */ | ||
990 | if ((freq != powernow_table[cntlofreq].frequency) || | ||
991 | (index != powernow_table[cntlofreq].index)) { | ||
992 | printk(KERN_ERR PFX | ||
993 | "Too many lo freq table " | ||
994 | "entries\n"); | ||
995 | return 1; | ||
996 | } | ||
997 | |||
998 | dprintk("double low frequency table entry, " | ||
999 | "ignoring it.\n"); | ||
1000 | invalidate_entry(data, i); | ||
1001 | continue; | ||
1002 | } else | ||
1003 | cntlofreq = i; | ||
1004 | } | ||
1005 | |||
1006 | if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { | 986 | if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { |
1007 | printk(KERN_INFO PFX "invalid freq entries " | 987 | printk(KERN_INFO PFX "invalid freq entries " |
1008 | "%u kHz vs. %u kHz\n", freq, | 988 | "%u kHz vs. %u kHz\n", freq, |
1009 | (unsigned int) | 989 | (unsigned int) |
1010 | (data->acpi_data.states[i].core_frequency | 990 | (data->acpi_data.states[i].core_frequency |
1011 | * 1000)); | 991 | * 1000)); |
1012 | invalidate_entry(data, i); | 992 | invalidate_entry(powernow_table, i); |
1013 | continue; | 993 | continue; |
1014 | } | 994 | } |
1015 | } | 995 | } |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 93ba8eeb100a..08be922de33a 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -34,13 +34,6 @@ detect_hypervisor_vendor(struct cpuinfo_x86 *c) | |||
34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; | 34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; |
35 | } | 35 | } |
36 | 36 | ||
37 | unsigned long get_hypervisor_tsc_freq(void) | ||
38 | { | ||
39 | if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) | ||
40 | return vmware_get_tsc_khz(); | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | static inline void __cpuinit | 37 | static inline void __cpuinit |
45 | hypervisor_set_feature_bits(struct cpuinfo_x86 *c) | 38 | hypervisor_set_feature_bits(struct cpuinfo_x86 *c) |
46 | { | 39 | { |
@@ -55,3 +48,10 @@ void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) | |||
55 | detect_hypervisor_vendor(c); | 48 | detect_hypervisor_vendor(c); |
56 | hypervisor_set_feature_bits(c); | 49 | hypervisor_set_feature_bits(c); |
57 | } | 50 | } |
51 | |||
52 | void __init init_hypervisor_platform(void) | ||
53 | { | ||
54 | init_hypervisor(&boot_cpu_data); | ||
55 | if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) | ||
56 | vmware_platform_setup(); | ||
57 | } | ||
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index bc24f514ec93..0a46b4df5d80 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/dmi.h> | 24 | #include <linux/dmi.h> |
25 | #include <asm/div64.h> | 25 | #include <asm/div64.h> |
26 | #include <asm/vmware.h> | 26 | #include <asm/vmware.h> |
27 | #include <asm/x86_init.h> | ||
27 | 28 | ||
28 | #define CPUID_VMWARE_INFO_LEAF 0x40000000 | 29 | #define CPUID_VMWARE_INFO_LEAF 0x40000000 |
29 | #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 | 30 | #define VMWARE_HYPERVISOR_MAGIC 0x564D5868 |
@@ -47,21 +48,29 @@ static inline int __vmware_platform(void) | |||
47 | return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; | 48 | return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC; |
48 | } | 49 | } |
49 | 50 | ||
50 | static unsigned long __vmware_get_tsc_khz(void) | 51 | static unsigned long vmware_get_tsc_khz(void) |
51 | { | 52 | { |
52 | uint64_t tsc_hz; | 53 | uint64_t tsc_hz; |
53 | uint32_t eax, ebx, ecx, edx; | 54 | uint32_t eax, ebx, ecx, edx; |
54 | 55 | ||
55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | 56 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); |
56 | 57 | ||
57 | if (ebx == UINT_MAX) | ||
58 | return 0; | ||
59 | tsc_hz = eax | (((uint64_t)ebx) << 32); | 58 | tsc_hz = eax | (((uint64_t)ebx) << 32); |
60 | do_div(tsc_hz, 1000); | 59 | do_div(tsc_hz, 1000); |
61 | BUG_ON(tsc_hz >> 32); | 60 | BUG_ON(tsc_hz >> 32); |
62 | return tsc_hz; | 61 | return tsc_hz; |
63 | } | 62 | } |
64 | 63 | ||
64 | void __init vmware_platform_setup(void) | ||
65 | { | ||
66 | uint32_t eax, ebx, ecx, edx; | ||
67 | |||
68 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | ||
69 | |||
70 | if (ebx != UINT_MAX) | ||
71 | x86_platform.calibrate_tsc = vmware_get_tsc_khz; | ||
72 | } | ||
73 | |||
65 | /* | 74 | /* |
66 | * While checking the dmi string infomation, just checking the product | 75 | * While checking the dmi string infomation, just checking the product |
67 | * serial key should be enough, as this will always have a VMware | 76 | * serial key should be enough, as this will always have a VMware |
@@ -87,12 +96,6 @@ int vmware_platform(void) | |||
87 | return 0; | 96 | return 0; |
88 | } | 97 | } |
89 | 98 | ||
90 | unsigned long vmware_get_tsc_khz(void) | ||
91 | { | ||
92 | BUG_ON(!vmware_platform()); | ||
93 | return __vmware_get_tsc_khz(); | ||
94 | } | ||
95 | |||
96 | /* | 99 | /* |
97 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. | 100 | * VMware hypervisor takes care of exporting a reliable TSC to the guest. |
98 | * Still, due to timing difference when running on virtual cpus, the TSC can | 101 | * Still, due to timing difference when running on virtual cpus, the TSC can |