diff options
-rw-r--r-- | arch/x86/include/asm/cpu.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/microcode.h | 39 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/msr.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/core.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/intel.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/rdrand.c | 25 | ||||
-rw-r--r-- | arch/x86/kvm/cpuid.h | 34 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 17 | ||||
-rw-r--r-- | arch/x86/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/lib/cpu.c | 35 |
13 files changed, 129 insertions, 74 deletions
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index bf2caa1dedc5..678637ad7476 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h | |||
@@ -36,4 +36,7 @@ extern int _debug_hotplug_cpu(int cpu, int action); | |||
36 | 36 | ||
37 | int mwait_usable(const struct cpuinfo_x86 *); | 37 | int mwait_usable(const struct cpuinfo_x86 *); |
38 | 38 | ||
39 | unsigned int x86_family(unsigned int sig); | ||
40 | unsigned int x86_model(unsigned int sig); | ||
41 | unsigned int x86_stepping(unsigned int sig); | ||
39 | #endif /* _ASM_X86_CPU_H */ | 42 | #endif /* _ASM_X86_CPU_H */ |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 34e62b1dcfce..1e1b07a5a738 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASM_X86_MICROCODE_H | 1 | #ifndef _ASM_X86_MICROCODE_H |
2 | #define _ASM_X86_MICROCODE_H | 2 | #define _ASM_X86_MICROCODE_H |
3 | 3 | ||
4 | #include <asm/cpu.h> | ||
4 | #include <linux/earlycpio.h> | 5 | #include <linux/earlycpio.h> |
5 | 6 | ||
6 | #define native_rdmsr(msr, val1, val2) \ | 7 | #define native_rdmsr(msr, val1, val2) \ |
@@ -95,14 +96,14 @@ static inline void __exit exit_amd_microcode(void) {} | |||
95 | 96 | ||
96 | /* | 97 | /* |
97 | * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. | 98 | * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. |
98 | * x86_vendor() gets vendor id for BSP. | 99 | * x86_cpuid_vendor() gets vendor id for BSP. |
99 | * | 100 | * |
100 | * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify | 101 | * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify |
101 | * coding, we still use x86_vendor() to get vendor id for AP. | 102 | * coding, we still use x86_cpuid_vendor() to get vendor id for AP. |
102 | * | 103 | * |
103 | * x86_vendor() gets vendor information directly from CPUID. | 104 | * x86_cpuid_vendor() gets vendor information directly from CPUID. |
104 | */ | 105 | */ |
105 | static inline int x86_vendor(void) | 106 | static inline int x86_cpuid_vendor(void) |
106 | { | 107 | { |
107 | u32 eax = 0x00000000; | 108 | u32 eax = 0x00000000; |
108 | u32 ebx, ecx = 0, edx; | 109 | u32 ebx, ecx = 0, edx; |
@@ -118,40 +119,14 @@ static inline int x86_vendor(void) | |||
118 | return X86_VENDOR_UNKNOWN; | 119 | return X86_VENDOR_UNKNOWN; |
119 | } | 120 | } |
120 | 121 | ||
121 | static inline unsigned int __x86_family(unsigned int sig) | 122 | static inline unsigned int x86_cpuid_family(void) |
122 | { | ||
123 | unsigned int x86; | ||
124 | |||
125 | x86 = (sig >> 8) & 0xf; | ||
126 | |||
127 | if (x86 == 0xf) | ||
128 | x86 += (sig >> 20) & 0xff; | ||
129 | |||
130 | return x86; | ||
131 | } | ||
132 | |||
133 | static inline unsigned int x86_family(void) | ||
134 | { | 123 | { |
135 | u32 eax = 0x00000001; | 124 | u32 eax = 0x00000001; |
136 | u32 ebx, ecx = 0, edx; | 125 | u32 ebx, ecx = 0, edx; |
137 | 126 | ||
138 | native_cpuid(&eax, &ebx, &ecx, &edx); | 127 | native_cpuid(&eax, &ebx, &ecx, &edx); |
139 | 128 | ||
140 | return __x86_family(eax); | 129 | return x86_family(eax); |
141 | } | ||
142 | |||
143 | static inline unsigned int x86_model(unsigned int sig) | ||
144 | { | ||
145 | unsigned int x86, model; | ||
146 | |||
147 | x86 = __x86_family(sig); | ||
148 | |||
149 | model = (sig >> 4) & 0xf; | ||
150 | |||
151 | if (x86 == 0x6 || x86 == 0xf) | ||
152 | model += ((sig >> 16) & 0xf) << 4; | ||
153 | |||
154 | return model; | ||
155 | } | 130 | } |
156 | 131 | ||
157 | #ifdef CONFIG_MICROCODE | 132 | #ifdef CONFIG_MICROCODE |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 690b4027e17c..b05402ef3b84 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -321,6 +321,7 @@ | |||
321 | #define MSR_F15H_PERF_CTR 0xc0010201 | 321 | #define MSR_F15H_PERF_CTR 0xc0010201 |
322 | #define MSR_F15H_NB_PERF_CTL 0xc0010240 | 322 | #define MSR_F15H_NB_PERF_CTL 0xc0010240 |
323 | #define MSR_F15H_NB_PERF_CTR 0xc0010241 | 323 | #define MSR_F15H_NB_PERF_CTR 0xc0010241 |
324 | #define MSR_F15H_IC_CFG 0xc0011021 | ||
324 | 325 | ||
325 | /* Fam 10h MSRs */ | 326 | /* Fam 10h MSRs */ |
326 | #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 | 327 | #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index fedd6e6d1e43..1e87bff093d1 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -221,7 +221,7 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high) | |||
221 | 221 | ||
222 | static inline void wrmsrl(unsigned msr, u64 val) | 222 | static inline void wrmsrl(unsigned msr, u64 val) |
223 | { | 223 | { |
224 | native_write_msr(msr, (u32)val, (u32)(val >> 32)); | 224 | native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); |
225 | } | 225 | } |
226 | 226 | ||
227 | /* wrmsr with exception handling */ | 227 | /* wrmsr with exception handling */ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 34c3ad608dd4..e678ddeed030 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -678,9 +678,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c) | |||
678 | * Disable it on the affected CPUs. | 678 | * Disable it on the affected CPUs. |
679 | */ | 679 | */ |
680 | if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { | 680 | if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { |
681 | if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { | 681 | if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { |
682 | value |= 0x1E; | 682 | value |= 0x1E; |
683 | wrmsrl_safe(0xc0011021, value); | 683 | wrmsrl_safe(MSR_F15H_IC_CFG, value); |
684 | } | 684 | } |
685 | } | 685 | } |
686 | } | 686 | } |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4d5279c95d5f..37830de8f60a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -581,14 +581,9 @@ void cpu_detect(struct cpuinfo_x86 *c) | |||
581 | u32 junk, tfms, cap0, misc; | 581 | u32 junk, tfms, cap0, misc; |
582 | 582 | ||
583 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 583 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
584 | c->x86 = (tfms >> 8) & 0xf; | 584 | c->x86 = x86_family(tfms); |
585 | c->x86_model = (tfms >> 4) & 0xf; | 585 | c->x86_model = x86_model(tfms); |
586 | c->x86_mask = tfms & 0xf; | 586 | c->x86_mask = x86_stepping(tfms); |
587 | |||
588 | if (c->x86 == 0xf) | ||
589 | c->x86 += (tfms >> 20) & 0xff; | ||
590 | if (c->x86 >= 0x6) | ||
591 | c->x86_model += ((tfms >> 16) & 0xf) << 4; | ||
592 | 587 | ||
593 | if (cap0 & (1<<19)) { | 588 | if (cap0 & (1<<19)) { |
594 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 589 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
@@ -1187,7 +1182,7 @@ void syscall_init(void) | |||
1187 | * They both write to the same internal register. STAR allows to | 1182 | * They both write to the same internal register. STAR allows to |
1188 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | 1183 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. |
1189 | */ | 1184 | */ |
1190 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | 1185 | wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); |
1191 | wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); | 1186 | wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); |
1192 | 1187 | ||
1193 | #ifdef CONFIG_IA32_EMULATION | 1188 | #ifdef CONFIG_IA32_EMULATION |
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b3e94ef461fd..faec7120c508 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -129,8 +129,8 @@ void __init load_ucode_bsp(void) | |||
129 | if (!have_cpuid_p()) | 129 | if (!have_cpuid_p()) |
130 | return; | 130 | return; |
131 | 131 | ||
132 | vendor = x86_vendor(); | 132 | vendor = x86_cpuid_vendor(); |
133 | family = x86_family(); | 133 | family = x86_cpuid_family(); |
134 | 134 | ||
135 | switch (vendor) { | 135 | switch (vendor) { |
136 | case X86_VENDOR_INTEL: | 136 | case X86_VENDOR_INTEL: |
@@ -165,8 +165,8 @@ void load_ucode_ap(void) | |||
165 | if (!have_cpuid_p()) | 165 | if (!have_cpuid_p()) |
166 | return; | 166 | return; |
167 | 167 | ||
168 | vendor = x86_vendor(); | 168 | vendor = x86_cpuid_vendor(); |
169 | family = x86_family(); | 169 | family = x86_cpuid_family(); |
170 | 170 | ||
171 | switch (vendor) { | 171 | switch (vendor) { |
172 | case X86_VENDOR_INTEL: | 172 | case X86_VENDOR_INTEL: |
@@ -206,8 +206,8 @@ void reload_early_microcode(void) | |||
206 | { | 206 | { |
207 | int vendor, family; | 207 | int vendor, family; |
208 | 208 | ||
209 | vendor = x86_vendor(); | 209 | vendor = x86_cpuid_vendor(); |
210 | family = x86_family(); | 210 | family = x86_cpuid_family(); |
211 | 211 | ||
212 | switch (vendor) { | 212 | switch (vendor) { |
213 | case X86_VENDOR_INTEL: | 213 | case X86_VENDOR_INTEL: |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index ce47402eb2f9..ee81c544ee0d 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -145,10 +145,10 @@ matching_model_microcode(struct microcode_header_intel *mc_header, | |||
145 | int ext_sigcount, i; | 145 | int ext_sigcount, i; |
146 | struct extended_signature *ext_sig; | 146 | struct extended_signature *ext_sig; |
147 | 147 | ||
148 | fam = __x86_family(sig); | 148 | fam = x86_family(sig); |
149 | model = x86_model(sig); | 149 | model = x86_model(sig); |
150 | 150 | ||
151 | fam_ucode = __x86_family(mc_header->sig); | 151 | fam_ucode = x86_family(mc_header->sig); |
152 | model_ucode = x86_model(mc_header->sig); | 152 | model_ucode = x86_model(mc_header->sig); |
153 | 153 | ||
154 | if (fam == fam_ucode && model == model_ucode) | 154 | if (fam == fam_ucode && model == model_ucode) |
@@ -163,7 +163,7 @@ matching_model_microcode(struct microcode_header_intel *mc_header, | |||
163 | ext_sigcount = ext_header->count; | 163 | ext_sigcount = ext_header->count; |
164 | 164 | ||
165 | for (i = 0; i < ext_sigcount; i++) { | 165 | for (i = 0; i < ext_sigcount; i++) { |
166 | fam_ucode = __x86_family(ext_sig->sig); | 166 | fam_ucode = x86_family(ext_sig->sig); |
167 | model_ucode = x86_model(ext_sig->sig); | 167 | model_ucode = x86_model(ext_sig->sig); |
168 | 168 | ||
169 | if (fam == fam_ucode && model == model_ucode) | 169 | if (fam == fam_ucode && model == model_ucode) |
@@ -365,7 +365,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci) | |||
365 | native_cpuid(&eax, &ebx, &ecx, &edx); | 365 | native_cpuid(&eax, &ebx, &ecx, &edx); |
366 | csig.sig = eax; | 366 | csig.sig = eax; |
367 | 367 | ||
368 | family = __x86_family(csig.sig); | 368 | family = x86_family(csig.sig); |
369 | model = x86_model(csig.sig); | 369 | model = x86_model(csig.sig); |
370 | 370 | ||
371 | if ((model >= 5) || (family > 6)) { | 371 | if ((model >= 5) || (family > 6)) { |
@@ -521,16 +521,12 @@ static bool __init load_builtin_intel_microcode(struct cpio_data *cp) | |||
521 | { | 521 | { |
522 | #ifdef CONFIG_X86_64 | 522 | #ifdef CONFIG_X86_64 |
523 | unsigned int eax = 0x00000001, ebx, ecx = 0, edx; | 523 | unsigned int eax = 0x00000001, ebx, ecx = 0, edx; |
524 | unsigned int family, model, stepping; | ||
525 | char name[30]; | 524 | char name[30]; |
526 | 525 | ||
527 | native_cpuid(&eax, &ebx, &ecx, &edx); | 526 | native_cpuid(&eax, &ebx, &ecx, &edx); |
528 | 527 | ||
529 | family = __x86_family(eax); | 528 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
530 | model = x86_model(eax); | 529 | x86_family(eax), x86_model(eax), x86_stepping(eax)); |
531 | stepping = eax & 0xf; | ||
532 | |||
533 | sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping); | ||
534 | 530 | ||
535 | return get_builtin_firmware(cp, name); | 531 | return get_builtin_firmware(cp, name); |
536 | #else | 532 | #else |
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 136ac74dee82..819d94982e07 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c | |||
@@ -33,28 +33,27 @@ static int __init x86_rdrand_setup(char *s) | |||
33 | __setup("nordrand", x86_rdrand_setup); | 33 | __setup("nordrand", x86_rdrand_setup); |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Force a reseed cycle; we are architecturally guaranteed a reseed | 36 | * RDRAND has Built-In-Self-Test (BIST) that runs on every invocation. |
37 | * after no more than 512 128-bit chunks of random data. This also | 37 | * Run the instruction a few times as a sanity check. |
38 | * acts as a test of the CPU capability. | 38 | * If it fails, it is simple to disable RDRAND here. |
39 | */ | 39 | */ |
40 | #define RESEED_LOOP ((512*128)/sizeof(unsigned long)) | 40 | #define SANITY_CHECK_LOOPS 8 |
41 | 41 | ||
42 | void x86_init_rdrand(struct cpuinfo_x86 *c) | 42 | void x86_init_rdrand(struct cpuinfo_x86 *c) |
43 | { | 43 | { |
44 | #ifdef CONFIG_ARCH_RANDOM | 44 | #ifdef CONFIG_ARCH_RANDOM |
45 | unsigned long tmp; | 45 | unsigned long tmp; |
46 | int i, count, ok; | 46 | int i; |
47 | 47 | ||
48 | if (!cpu_has(c, X86_FEATURE_RDRAND)) | 48 | if (!cpu_has(c, X86_FEATURE_RDRAND)) |
49 | return; /* Nothing to do */ | 49 | return; |
50 | 50 | ||
51 | for (count = i = 0; i < RESEED_LOOP; i++) { | 51 | for (i = 0; i < SANITY_CHECK_LOOPS; i++) { |
52 | ok = rdrand_long(&tmp); | 52 | if (!rdrand_long(&tmp)) { |
53 | if (ok) | 53 | clear_cpu_cap(c, X86_FEATURE_RDRAND); |
54 | count++; | 54 | printk_once(KERN_WARNING "rdrand: disabled\n"); |
55 | return; | ||
56 | } | ||
55 | } | 57 | } |
56 | |||
57 | if (count != RESEED_LOOP) | ||
58 | clear_cpu_cap(c, X86_FEATURE_RDRAND); | ||
59 | #endif | 58 | #endif |
60 | } | 59 | } |
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 3f5c48ddba45..c8eda1498121 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define ARCH_X86_KVM_CPUID_H | 2 | #define ARCH_X86_KVM_CPUID_H |
3 | 3 | ||
4 | #include "x86.h" | 4 | #include "x86.h" |
5 | #include <asm/cpu.h> | ||
5 | 6 | ||
6 | int kvm_update_cpuid(struct kvm_vcpu *vcpu); | 7 | int kvm_update_cpuid(struct kvm_vcpu *vcpu); |
7 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, | 8 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
@@ -178,4 +179,37 @@ static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu) | |||
178 | } | 179 | } |
179 | #undef BIT_NRIPS | 180 | #undef BIT_NRIPS |
180 | 181 | ||
182 | static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) | ||
183 | { | ||
184 | struct kvm_cpuid_entry2 *best; | ||
185 | |||
186 | best = kvm_find_cpuid_entry(vcpu, 0x1, 0); | ||
187 | if (!best) | ||
188 | return -1; | ||
189 | |||
190 | return x86_family(best->eax); | ||
191 | } | ||
192 | |||
193 | static inline int guest_cpuid_model(struct kvm_vcpu *vcpu) | ||
194 | { | ||
195 | struct kvm_cpuid_entry2 *best; | ||
196 | |||
197 | best = kvm_find_cpuid_entry(vcpu, 0x1, 0); | ||
198 | if (!best) | ||
199 | return -1; | ||
200 | |||
201 | return x86_model(best->eax); | ||
202 | } | ||
203 | |||
204 | static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) | ||
205 | { | ||
206 | struct kvm_cpuid_entry2 *best; | ||
207 | |||
208 | best = kvm_find_cpuid_entry(vcpu, 0x1, 0); | ||
209 | if (!best) | ||
210 | return -1; | ||
211 | |||
212 | return x86_stepping(best->eax); | ||
213 | } | ||
214 | |||
181 | #endif | 215 | #endif |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 899c40f826dd..22aef20bf9c7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3053,6 +3053,23 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
3053 | case MSR_IA32_UCODE_REV: | 3053 | case MSR_IA32_UCODE_REV: |
3054 | msr_info->data = 0x01000065; | 3054 | msr_info->data = 0x01000065; |
3055 | break; | 3055 | break; |
3056 | case MSR_F15H_IC_CFG: { | ||
3057 | |||
3058 | int family, model; | ||
3059 | |||
3060 | family = guest_cpuid_family(vcpu); | ||
3061 | model = guest_cpuid_model(vcpu); | ||
3062 | |||
3063 | if (family < 0 || model < 0) | ||
3064 | return kvm_get_msr_common(vcpu, msr_info); | ||
3065 | |||
3066 | msr_info->data = 0; | ||
3067 | |||
3068 | if (family == 0x15 && | ||
3069 | (model >= 0x2 && model < 0x20)) | ||
3070 | msr_info->data = 0x1E; | ||
3071 | } | ||
3072 | break; | ||
3056 | default: | 3073 | default: |
3057 | return kvm_get_msr_common(vcpu, msr_info); | 3074 | return kvm_get_msr_common(vcpu, msr_info); |
3058 | } | 3075 | } |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index f2587888d987..a501fa25da41 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -16,7 +16,7 @@ clean-files := inat-tables.c | |||
16 | 16 | ||
17 | obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o | 17 | obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o |
18 | 18 | ||
19 | lib-y := delay.o misc.o cmdline.o | 19 | lib-y := delay.o misc.o cmdline.o cpu.o |
20 | lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o | 20 | lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o |
21 | lib-y += memcpy_$(BITS).o | 21 | lib-y += memcpy_$(BITS).o |
22 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 22 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c new file mode 100644 index 000000000000..aa417a97511c --- /dev/null +++ b/arch/x86/lib/cpu.c | |||
@@ -0,0 +1,35 @@ | |||
1 | #include <linux/module.h> | ||
2 | |||
3 | unsigned int x86_family(unsigned int sig) | ||
4 | { | ||
5 | unsigned int x86; | ||
6 | |||
7 | x86 = (sig >> 8) & 0xf; | ||
8 | |||
9 | if (x86 == 0xf) | ||
10 | x86 += (sig >> 20) & 0xff; | ||
11 | |||
12 | return x86; | ||
13 | } | ||
14 | EXPORT_SYMBOL_GPL(x86_family); | ||
15 | |||
16 | unsigned int x86_model(unsigned int sig) | ||
17 | { | ||
18 | unsigned int fam, model; | ||
19 | |||
20 | fam = x86_family(sig); | ||
21 | |||
22 | model = (sig >> 4) & 0xf; | ||
23 | |||
24 | if (fam >= 0x6) | ||
25 | model += ((sig >> 16) & 0xf) << 4; | ||
26 | |||
27 | return model; | ||
28 | } | ||
29 | EXPORT_SYMBOL_GPL(x86_model); | ||
30 | |||
31 | unsigned int x86_stepping(unsigned int sig) | ||
32 | { | ||
33 | return sig & 0xf; | ||
34 | } | ||
35 | EXPORT_SYMBOL_GPL(x86_stepping); | ||