aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-06 13:02:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-06 13:02:36 -0400
commit0f477dd0851bdcee82923da66a7fc4a44cb1bc3d (patch)
treecf8937ba8c7800c6143aa3c9758c6e184c1289ec
parentc4efd6b569b2646e1346a08a4c40286f8bcb5f11 (diff)
parente8c534ec068af1a0845aceda373a9bfd2de62030 (diff)
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: Fix keeping track of AMD C1E x86, cpu: Package Level Thermal Control, Power Limit Notification definitions x86, cpu: Export AMD errata definitions x86, cpu: Use AMD errata checking framework for erratum 383 x86, cpu: Clean up AMD erratum 400 workaround x86, cpu: AMD errata checking framework x86, cpu: Split addon_cpuid_features.c x86, cpu: Clean up formatting in cpufeature.h, remove override x86, cpu: Enumerate xsaveopt x86, cpu: Add xsaveopt cpufeature x86, cpu: Make init_scattered_cpuid_features() consider cpuid subleaves x86, cpu: Support the features flags in new CPUID leaf 7 x86, cpu: Add CPU flags for F16C and RDRND x86: Look for IA32_ENERGY_PERF_BIAS support x86, AMD: Extend support to future families x86, cacheinfo: Carve out L3 cache slot accessors x86, xsave: Cleanup return codes in check_for_xstate()
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h29
-rw-r--r--arch/x86/include/asm/msr-index.h19
-rw-r--r--arch/x86/include/asm/processor.h21
-rw-r--r--arch/x86/include/asm/required-features.h2
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c77
-rw-r--r--arch/x86/kernel/cpu/common.c10
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c108
-rw-r--r--arch/x86/kernel/cpu/scattered.c63
-rw-r--r--arch/x86/kernel/cpu/topology.c (renamed from arch/x86/kernel/cpu/addon_cpuid_features.c)58
-rw-r--r--arch/x86/kernel/process.c45
-rw-r--r--arch/x86/kernel/xsave.c13
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--drivers/acpi/processor_idle.c2
15 files changed, 311 insertions, 143 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index aa2c39d968fc..92091de11113 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -134,7 +134,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
134 boot_cpu_data.x86_model <= 0x05 && 134 boot_cpu_data.x86_model <= 0x05 &&
135 boot_cpu_data.x86_mask < 0x0A) 135 boot_cpu_data.x86_mask < 0x0A)
136 return 1; 136 return 1;
137 else if (boot_cpu_has(X86_FEATURE_AMDC1E)) 137 else if (c1e_detected)
138 return 1; 138 return 1;
139 else 139 else
140 return max_cstate; 140 return max_cstate;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 468145914389..0b205b8a4308 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -6,7 +6,7 @@
6 6
7#include <asm/required-features.h> 7#include <asm/required-features.h>
8 8
9#define NCAPINTS 9 /* N 32-bit words worth of info */ 9#define NCAPINTS 10 /* N 32-bit words worth of info */
10 10
11/* 11/*
12 * Note: If the comment begins with a quoted string, that string is used 12 * Note: If the comment begins with a quoted string, that string is used
@@ -89,7 +89,7 @@
89#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ 89#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
90#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ 90#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
91#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 91#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
92#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ 92 /* 21 available, was AMD_C1E */
93#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ 93#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
94#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ 94#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ 95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
@@ -124,6 +124,8 @@
124#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 124#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
125#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ 125#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
126#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ 126#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
127#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
128#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */
127#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ 129#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
128 130
129/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 131/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
@@ -157,22 +159,29 @@
157 159
158/* 160/*
159 * Auxiliary flags: Linux defined - For features scattered in various 161 * Auxiliary flags: Linux defined - For features scattered in various
160 * CPUID levels like 0x6, 0xA etc 162 * CPUID levels like 0x6, 0xA etc, word 7
161 */ 163 */
162#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ 164#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
163#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ 165#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
164#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */ 166#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
167#define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
168#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
169#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
170#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
165 171
166/* Virtualization flags: Linux defined */ 172/* Virtualization flags: Linux defined, word 8 */
167#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 173#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
168#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ 174#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
169#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ 175#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
170#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ 176#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
171#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ 177#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
172#define X86_FEATURE_NPT (8*32+5) /* AMD Nested Page Table support */ 178#define X86_FEATURE_NPT (8*32+ 5) /* AMD Nested Page Table support */
173#define X86_FEATURE_LBRV (8*32+6) /* AMD LBR Virtualization support */ 179#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
174#define X86_FEATURE_SVML (8*32+7) /* "svm_lock" AMD SVM locking MSR */ 180#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
175#define X86_FEATURE_NRIPS (8*32+8) /* "nrip_save" AMD SVM next_rip save */ 181#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
182
183/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
184#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
176 185
177#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 186#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
178 187
@@ -194,7 +203,9 @@ extern const char * const x86_power_flags[32];
194 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ 203 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
195 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ 204 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
196 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ 205 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
197 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ 206 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
207 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
208 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) \
198 ? 1 : \ 209 ? 1 : \
199 test_cpu_cap(c, bit)) 210 test_cpu_cap(c, bit))
200 211
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 509a42187dc2..2eeb2e692008 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -226,12 +226,14 @@
226#define MSR_IA32_THERM_CONTROL 0x0000019a 226#define MSR_IA32_THERM_CONTROL 0x0000019a
227#define MSR_IA32_THERM_INTERRUPT 0x0000019b 227#define MSR_IA32_THERM_INTERRUPT 0x0000019b
228 228
229#define THERM_INT_LOW_ENABLE (1 << 0) 229#define THERM_INT_HIGH_ENABLE (1 << 0)
230#define THERM_INT_HIGH_ENABLE (1 << 1) 230#define THERM_INT_LOW_ENABLE (1 << 1)
231#define THERM_INT_PLN_ENABLE (1 << 24)
231 232
232#define MSR_IA32_THERM_STATUS 0x0000019c 233#define MSR_IA32_THERM_STATUS 0x0000019c
233 234
234#define THERM_STATUS_PROCHOT (1 << 0) 235#define THERM_STATUS_PROCHOT (1 << 0)
236#define THERM_STATUS_POWER_LIMIT (1 << 10)
235 237
236#define MSR_THERM2_CTL 0x0000019d 238#define MSR_THERM2_CTL 0x0000019d
237 239
@@ -241,6 +243,19 @@
241 243
242#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 244#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
243 245
246#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
247
248#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
249
250#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
251#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
252
253#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
254
255#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
256#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
257#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
258
244/* MISC_ENABLE bits: architectural */ 259/* MISC_ENABLE bits: architectural */
245#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) 260#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
246#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) 261#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7e5c6a60b8ee..325b7bdbebaa 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -762,6 +762,7 @@ extern void init_c1e_mask(void);
762extern unsigned long boot_option_idle_override; 762extern unsigned long boot_option_idle_override;
763extern unsigned long idle_halt; 763extern unsigned long idle_halt;
764extern unsigned long idle_nomwait; 764extern unsigned long idle_nomwait;
765extern bool c1e_detected;
765 766
766/* 767/*
767 * on systems with caches, caches must be flashed as the absolute 768 * on systems with caches, caches must be flashed as the absolute
@@ -1025,4 +1026,24 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
1025 return ratio; 1026 return ratio;
1026} 1027}
1027 1028
1029/*
1030 * AMD errata checking
1031 */
1032#ifdef CONFIG_CPU_SUP_AMD
1033extern const int amd_erratum_383[];
1034extern const int amd_erratum_400[];
1035extern bool cpu_has_amd_erratum(const int *);
1036
1037#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1038#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1039#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1040 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1041#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1042#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1043#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1044
1045#else
1046#define cpu_has_amd_erratum(x) (false)
1047#endif /* CONFIG_CPU_SUP_AMD */
1048
1028#endif /* _ASM_X86_PROCESSOR_H */ 1049#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 64cf2d24fad1..6c7fc25f2c34 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -84,5 +84,7 @@
84#define REQUIRED_MASK5 0 84#define REQUIRED_MASK5 0
85#define REQUIRED_MASK6 0 85#define REQUIRED_MASK6 0
86#define REQUIRED_MASK7 0 86#define REQUIRED_MASK7 0
87#define REQUIRED_MASK8 0
88#define REQUIRED_MASK9 0
87 89
88#endif /* _ASM_X86_REQUIRED_FEATURES_H */ 90#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 3a785da34b6f..5e3a3512ba05 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -12,7 +12,7 @@ endif
12nostackp := $(call cc-option, -fno-stack-protector) 12nostackp := $(call cc-option, -fno-stack-protector)
13CFLAGS_common.o := $(nostackp) 13CFLAGS_common.o := $(nostackp)
14 14
15obj-y := intel_cacheinfo.o addon_cpuid_features.o 15obj-y := intel_cacheinfo.o scattered.o topology.o
16obj-y += proc.o capflags.o powerflags.o common.o 16obj-y += proc.o capflags.o powerflags.o common.o
17obj-y += vmware.o hypervisor.o sched.o mshyperv.o 17obj-y += vmware.o hypervisor.o sched.o mshyperv.o
18 18
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index e485825130d2..60a57b13082d 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -466,7 +466,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
466 } 466 }
467 467
468 } 468 }
469 if (c->x86 == 0x10 || c->x86 == 0x11) 469 if (c->x86 >= 0x10)
470 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 470 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
471 471
472 /* get apicid instead of initial apic id from cpuid */ 472 /* get apicid instead of initial apic id from cpuid */
@@ -529,7 +529,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
529 num_cache_leaves = 3; 529 num_cache_leaves = 3;
530 } 530 }
531 531
532 if (c->x86 >= 0xf && c->x86 <= 0x11) 532 if (c->x86 >= 0xf)
533 set_cpu_cap(c, X86_FEATURE_K8); 533 set_cpu_cap(c, X86_FEATURE_K8);
534 534
535 if (cpu_has_xmm2) { 535 if (cpu_has_xmm2) {
@@ -546,7 +546,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
546 fam10h_check_enable_mmcfg(); 546 fam10h_check_enable_mmcfg();
547 } 547 }
548 548
549 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { 549 if (c == &boot_cpu_data && c->x86 >= 0xf) {
550 unsigned long long tseg; 550 unsigned long long tseg;
551 551
552 /* 552 /*
@@ -609,3 +609,74 @@ static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
609}; 609};
610 610
611cpu_dev_register(amd_cpu_dev); 611cpu_dev_register(amd_cpu_dev);
612
613/*
614 * AMD errata checking
615 *
616 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
617 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
618 * have an OSVW id assigned, which it takes as first argument. Both take a
619 * variable number of family-specific model-stepping ranges created by
620 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
621 * int[] in arch/x86/include/asm/processor.h.
622 *
623 * Example:
624 *
625 * const int amd_erratum_319[] =
626 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
627 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
628 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
629 */
630
631const int amd_erratum_400[] =
632 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
633 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
634EXPORT_SYMBOL_GPL(amd_erratum_400);
635
636const int amd_erratum_383[] =
637 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
638EXPORT_SYMBOL_GPL(amd_erratum_383);
639
640bool cpu_has_amd_erratum(const int *erratum)
641{
642 struct cpuinfo_x86 *cpu = &current_cpu_data;
643 int osvw_id = *erratum++;
644 u32 range;
645 u32 ms;
646
647 /*
648 * If called early enough that current_cpu_data hasn't been initialized
649 * yet, fall back to boot_cpu_data.
650 */
651 if (cpu->x86 == 0)
652 cpu = &boot_cpu_data;
653
654 if (cpu->x86_vendor != X86_VENDOR_AMD)
655 return false;
656
657 if (osvw_id >= 0 && osvw_id < 65536 &&
658 cpu_has(cpu, X86_FEATURE_OSVW)) {
659 u64 osvw_len;
660
661 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
662 if (osvw_id < osvw_len) {
663 u64 osvw_bits;
664
665 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
666 osvw_bits);
667 return osvw_bits & (1ULL << (osvw_id & 0x3f));
668 }
669 }
670
671 /* OSVW unavailable or ID unknown, match family-model-stepping range */
672 ms = (cpu->x86_model << 8) | cpu->x86_mask;
673 while ((range = *erratum++))
674 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
675 (ms >= AMD_MODEL_RANGE_START(range)) &&
676 (ms <= AMD_MODEL_RANGE_END(range)))
677 return true;
678
679 return false;
680}
681
682EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 68e4a6f2211e..c7358303d8cd 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -551,6 +551,16 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
551 c->x86_capability[4] = excap; 551 c->x86_capability[4] = excap;
552 } 552 }
553 553
554 /* Additional Intel-defined flags: level 0x00000007 */
555 if (c->cpuid_level >= 0x00000007) {
556 u32 eax, ebx, ecx, edx;
557
558 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
559
560 if (eax > 0)
561 c->x86_capability[9] = ebx;
562 }
563
554 /* AMD-defined flags: level 0x80000001 */ 564 /* AMD-defined flags: level 0x80000001 */
555 xlvl = cpuid_eax(0x80000000); 565 xlvl = cpuid_eax(0x80000000);
556 c->extended_cpuid_level = xlvl; 566 c->extended_cpuid_level = xlvl;
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 33eae2062cf5..898c2f4eab88 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -347,8 +347,8 @@ static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
347 return l3; 347 return l3;
348} 348}
349 349
350static void __cpuinit 350static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
351amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) 351 int index)
352{ 352{
353 int node; 353 int node;
354 354
@@ -396,20 +396,39 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
396 this_leaf->l3 = l3_caches[node]; 396 this_leaf->l3 = l3_caches[node];
397} 397}
398 398
399/*
400 * check whether a slot used for disabling an L3 index is occupied.
401 * @l3: L3 cache descriptor
402 * @slot: slot number (0..1)
403 *
404 * @returns: the disabled index if used or negative value if slot free.
405 */
406int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
407{
408 unsigned int reg = 0;
409
410 pci_read_config_dword(l3->dev, 0x1BC + slot * 4, &reg);
411
412 /* check whether this slot is activated already */
413 if (reg & (3UL << 30))
414 return reg & 0xfff;
415
416 return -1;
417}
418
399static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, 419static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
400 unsigned int slot) 420 unsigned int slot)
401{ 421{
402 struct pci_dev *dev = this_leaf->l3->dev; 422 int index;
403 unsigned int reg = 0;
404 423
405 if (!this_leaf->l3 || !this_leaf->l3->can_disable) 424 if (!this_leaf->l3 || !this_leaf->l3->can_disable)
406 return -EINVAL; 425 return -EINVAL;
407 426
408 if (!dev) 427 index = amd_get_l3_disable_slot(this_leaf->l3, slot);
409 return -EINVAL; 428 if (index >= 0)
429 return sprintf(buf, "%d\n", index);
410 430
411 pci_read_config_dword(dev, 0x1BC + slot * 4, &reg); 431 return sprintf(buf, "FREE\n");
412 return sprintf(buf, "0x%08x\n", reg);
413} 432}
414 433
415#define SHOW_CACHE_DISABLE(slot) \ 434#define SHOW_CACHE_DISABLE(slot) \
@@ -451,37 +470,74 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
451 } 470 }
452} 471}
453 472
454 473/*
455static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, 474 * disable a L3 cache index by using a disable-slot
456 const char *buf, size_t count, 475 *
457 unsigned int slot) 476 * @l3: L3 cache descriptor
477 * @cpu: A CPU on the node containing the L3 cache
478 * @slot: slot number (0..1)
479 * @index: index to disable
480 *
481 * @return: 0 on success, error status on failure
482 */
483int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
484 unsigned long index)
458{ 485{
459 struct pci_dev *dev = this_leaf->l3->dev; 486 int ret = 0;
460 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
461 unsigned long val = 0;
462 487
463#define SUBCACHE_MASK (3UL << 20) 488#define SUBCACHE_MASK (3UL << 20)
464#define SUBCACHE_INDEX 0xfff 489#define SUBCACHE_INDEX 0xfff
465 490
466 if (!this_leaf->l3 || !this_leaf->l3->can_disable) 491 /*
492 * check whether this slot is already used or
493 * the index is already disabled
494 */
495 ret = amd_get_l3_disable_slot(l3, slot);
496 if (ret >= 0)
467 return -EINVAL; 497 return -EINVAL;
468 498
499 /*
500 * check whether the other slot has disabled the
501 * same index already
502 */
503 if (index == amd_get_l3_disable_slot(l3, !slot))
504 return -EINVAL;
505
506 /* do not allow writes outside of allowed bits */
507 if ((index & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
508 ((index & SUBCACHE_INDEX) > l3->indices))
509 return -EINVAL;
510
511 amd_l3_disable_index(l3, cpu, slot, index);
512
513 return 0;
514}
515
516static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
517 const char *buf, size_t count,
518 unsigned int slot)
519{
520 unsigned long val = 0;
521 int cpu, err = 0;
522
469 if (!capable(CAP_SYS_ADMIN)) 523 if (!capable(CAP_SYS_ADMIN))
470 return -EPERM; 524 return -EPERM;
471 525
472 if (!dev) 526 if (!this_leaf->l3 || !this_leaf->l3->can_disable)
473 return -EINVAL; 527 return -EINVAL;
474 528
475 if (strict_strtoul(buf, 10, &val) < 0) 529 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
476 return -EINVAL;
477 530
478 /* do not allow writes outside of allowed bits */ 531 if (strict_strtoul(buf, 10, &val) < 0)
479 if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
480 ((val & SUBCACHE_INDEX) > this_leaf->l3->indices))
481 return -EINVAL; 532 return -EINVAL;
482 533
483 amd_l3_disable_index(this_leaf->l3, cpu, slot, val); 534 err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
484 535 if (err) {
536 if (err == -EEXIST)
537 printk(KERN_WARNING "L3 disable slot %d in use!\n",
538 slot);
539 return err;
540 }
485 return count; 541 return count;
486} 542}
487 543
@@ -502,7 +558,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
502 558
503#else /* CONFIG_CPU_SUP_AMD */ 559#else /* CONFIG_CPU_SUP_AMD */
504static void __cpuinit 560static void __cpuinit
505amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) 561amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index)
506{ 562{
507}; 563};
508#endif /* CONFIG_CPU_SUP_AMD */ 564#endif /* CONFIG_CPU_SUP_AMD */
@@ -518,7 +574,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
518 574
519 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 575 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
520 amd_cpuid4(index, &eax, &ebx, &ecx); 576 amd_cpuid4(index, &eax, &ebx, &ecx);
521 amd_check_l3_disable(index, this_leaf); 577 amd_check_l3_disable(this_leaf, index);
522 } else { 578 } else {
523 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); 579 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
524 } 580 }
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
new file mode 100644
index 000000000000..34b4dad6f0b8
--- /dev/null
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -0,0 +1,63 @@
1/*
2 * Routines to indentify additional cpu features that are scattered in
3 * cpuid space.
4 */
5#include <linux/cpu.h>
6
7#include <asm/pat.h>
8#include <asm/processor.h>
9
10#include <asm/apic.h>
11
12struct cpuid_bit {
13 u16 feature;
14 u8 reg;
15 u8 bit;
16 u32 level;
17 u32 sub_leaf;
18};
19
20enum cpuid_regs {
21 CR_EAX = 0,
22 CR_ECX,
23 CR_EDX,
24 CR_EBX
25};
26
27void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
28{
29 u32 max_level;
30 u32 regs[4];
31 const struct cpuid_bit *cb;
32
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
34 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
35 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
36 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
37 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
38 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
39 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
40 { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 },
41 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
42 { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
43 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
44 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
45 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
46 { 0, 0, 0, 0, 0 }
47 };
48
49 for (cb = cpuid_bits; cb->feature; cb++) {
50
51 /* Verify that the level is valid */
52 max_level = cpuid_eax(cb->level & 0xffff0000);
53 if (max_level < cb->level ||
54 max_level > (cb->level | 0xffff))
55 continue;
56
57 cpuid_count(cb->level, cb->sub_leaf, &regs[CR_EAX],
58 &regs[CR_EBX], &regs[CR_ECX], &regs[CR_EDX]);
59
60 if (regs[cb->reg] & (1 << cb->bit))
61 set_cpu_cap(c, cb->feature);
62 }
63}
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/topology.c
index 10fa5684a662..4397e987a1cf 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -1,62 +1,14 @@
1/* 1/*
2 * Routines to indentify additional cpu features that are scattered in 2 * Check for extended topology enumeration cpuid leaf 0xb and if it
3 * cpuid space. 3 * exists, use it for populating initial_apicid and cpu topology
4 * detection.
4 */ 5 */
5#include <linux/cpu.h>
6 6
7#include <linux/cpu.h>
8#include <asm/apic.h>
7#include <asm/pat.h> 9#include <asm/pat.h>
8#include <asm/processor.h> 10#include <asm/processor.h>
9 11
10#include <asm/apic.h>
11
12struct cpuid_bit {
13 u16 feature;
14 u8 reg;
15 u8 bit;
16 u32 level;
17};
18
19enum cpuid_regs {
20 CR_EAX = 0,
21 CR_ECX,
22 CR_EDX,
23 CR_EBX
24};
25
26void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
27{
28 u32 max_level;
29 u32 regs[4];
30 const struct cpuid_bit *cb;
31
32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
35 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006 },
36 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 },
37 { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
38 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
39 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
40 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
41 { 0, 0, 0, 0 }
42 };
43
44 for (cb = cpuid_bits; cb->feature; cb++) {
45
46 /* Verify that the level is valid */
47 max_level = cpuid_eax(cb->level & 0xffff0000);
48 if (max_level < cb->level ||
49 max_level > (cb->level | 0xffff))
50 continue;
51
52 cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
53 &regs[CR_ECX], &regs[CR_EDX]);
54
55 if (regs[cb->reg] & (1 << cb->bit))
56 set_cpu_cap(c, cb->feature);
57 }
58}
59
60/* leaf 0xb SMT level */ 12/* leaf 0xb SMT level */
61#define SMT_LEVEL 0 13#define SMT_LEVEL 0
62 14
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index cbcf013a0ec6..d401f1d2d06e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -526,44 +526,10 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
526 return (edx & MWAIT_EDX_C1); 526 return (edx & MWAIT_EDX_C1);
527} 527}
528 528
529/* 529bool c1e_detected;
530 * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e. 530EXPORT_SYMBOL(c1e_detected);
531 * For more information see
532 * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
533 * - Erratum #365 for family 0x11 (not affected because C1e not in use)
534 */
535static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
536{
537 u64 val;
538 if (c->x86_vendor != X86_VENDOR_AMD)
539 goto no_c1e_idle;
540
541 /* Family 0x0f models < rev F do not have C1E */
542 if (c->x86 == 0x0F && c->x86_model >= 0x40)
543 return 1;
544
545 if (c->x86 == 0x10) {
546 /*
547 * check OSVW bit for CPUs that are not affected
548 * by erratum #400
549 */
550 if (cpu_has(c, X86_FEATURE_OSVW)) {
551 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
552 if (val >= 2) {
553 rdmsrl(MSR_AMD64_OSVW_STATUS, val);
554 if (!(val & BIT(1)))
555 goto no_c1e_idle;
556 }
557 }
558 return 1;
559 }
560
561no_c1e_idle:
562 return 0;
563}
564 531
565static cpumask_var_t c1e_mask; 532static cpumask_var_t c1e_mask;
566static int c1e_detected;
567 533
568void c1e_remove_cpu(int cpu) 534void c1e_remove_cpu(int cpu)
569{ 535{
@@ -585,12 +551,12 @@ static void c1e_idle(void)
585 u32 lo, hi; 551 u32 lo, hi;
586 552
587 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); 553 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
554
588 if (lo & K8_INTP_C1E_ACTIVE_MASK) { 555 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
589 c1e_detected = 1; 556 c1e_detected = true;
590 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 557 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
591 mark_tsc_unstable("TSC halt in AMD C1E"); 558 mark_tsc_unstable("TSC halt in AMD C1E");
592 printk(KERN_INFO "System has AMD C1E enabled\n"); 559 printk(KERN_INFO "System has AMD C1E enabled\n");
593 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
594 } 560 }
595 } 561 }
596 562
@@ -639,7 +605,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
639 */ 605 */
640 printk(KERN_INFO "using mwait in idle threads.\n"); 606 printk(KERN_INFO "using mwait in idle threads.\n");
641 pm_idle = mwait_idle; 607 pm_idle = mwait_idle;
642 } else if (check_c1e_idle(c)) { 608 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
609 /* E400: APIC timer interrupt does not wake up CPU from C1e */
643 printk(KERN_INFO "using C1E aware idle routine\n"); 610 printk(KERN_INFO "using C1E aware idle routine\n");
644 pm_idle = c1e_idle; 611 pm_idle = c1e_idle;
645 } else 612 } else
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 37e68fc5e24a..980149867a19 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -36,15 +36,14 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
36 36
37 err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], 37 err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0],
38 sizeof(struct _fpx_sw_bytes)); 38 sizeof(struct _fpx_sw_bytes));
39
40 if (err) 39 if (err)
41 return err; 40 return -EFAULT;
42 41
43 /* 42 /*
44 * First Magic check failed. 43 * First Magic check failed.
45 */ 44 */
46 if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) 45 if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1)
47 return -1; 46 return -EINVAL;
48 47
49 /* 48 /*
50 * Check for error scenarios. 49 * Check for error scenarios.
@@ -52,19 +51,21 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
52 if (fx_sw_user->xstate_size < min_xstate_size || 51 if (fx_sw_user->xstate_size < min_xstate_size ||
53 fx_sw_user->xstate_size > xstate_size || 52 fx_sw_user->xstate_size > xstate_size ||
54 fx_sw_user->xstate_size > fx_sw_user->extended_size) 53 fx_sw_user->xstate_size > fx_sw_user->extended_size)
55 return -1; 54 return -EINVAL;
56 55
57 err = __get_user(magic2, (__u32 *) (((void *)fpstate) + 56 err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
58 fx_sw_user->extended_size - 57 fx_sw_user->extended_size -
59 FP_XSTATE_MAGIC2_SIZE)); 58 FP_XSTATE_MAGIC2_SIZE));
59 if (err)
60 return err;
60 /* 61 /*
61 * Check for the presence of second magic word at the end of memory 62 * Check for the presence of second magic word at the end of memory
62 * layout. This detects the case where the user just copied the legacy 63 * layout. This detects the case where the user just copied the legacy
63 * fpstate layout with out copying the extended state information 64 * fpstate layout with out copying the extended state information
64 * in the memory layout. 65 * in the memory layout.
65 */ 66 */
66 if (err || magic2 != FP_XSTATE_MAGIC2) 67 if (magic2 != FP_XSTATE_MAGIC2)
67 return -1; 68 return -EFAULT;
68 69
69 return 0; 70 return 0;
70} 71}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 56c9b6bd7655..5c81daf3ef57 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -384,8 +384,7 @@ static void svm_init_erratum_383(void)
384 int err; 384 int err;
385 u64 val; 385 u64 val;
386 386
387 /* Only Fam10h is affected */ 387 if (!cpu_has_amd_erratum(amd_erratum_383))
388 if (boot_cpu_data.x86 != 0x10)
389 return; 388 return;
390 389
391 /* Use _safe variants to not break nested virtualization */ 390 /* Use _safe variants to not break nested virtualization */
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e9a8026d39f0..eead3f581fb5 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -164,7 +164,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
164 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 164 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
165 return; 165 return;
166 166
167 if (boot_cpu_has(X86_FEATURE_AMDC1E)) 167 if (c1e_detected)
168 type = ACPI_STATE_C1; 168 type = ACPI_STATE_C1;
169 169
170 /* 170 /*