aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/boot/cpucheck.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/boot/cpucheck.c')
-rw-r--r--arch/x86/boot/cpucheck.c268
1 files changed, 268 insertions, 0 deletions
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
new file mode 100644
index 000000000000..e655a89c5510
--- /dev/null
+++ b/arch/x86/boot/cpucheck.c
@@ -0,0 +1,268 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007 rPath, Inc. - All Rights Reserved
5 *
6 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2.
8 *
9 * ----------------------------------------------------------------------- */
10
11/*
12 * arch/i386/boot/cpucheck.c
13 *
14 * Check for obligatory CPU features and abort if the features are not
15 * present. This code should be compilable as 16-, 32- or 64-bit
16 * code, so be very careful with types and inline assembly.
17 *
18 * This code should not contain any messages; that requires an
19 * additional wrapper.
20 *
21 * As written, this code is not safe for inclusion into the kernel
22 * proper (after FPU initialization, in particular).
23 */
24
25#ifdef _SETUP
26# include "boot.h"
27# include "bitops.h"
28#endif
29#include <linux/types.h>
30#include <asm/cpufeature.h>
31#include <asm/processor-flags.h>
32#include <asm/required-features.h>
33#include <asm/msr-index.h>
34
35struct cpu_features {
36 int level; /* Family, or 64 for x86-64 */
37 int model;
38 u32 flags[NCAPINTS];
39};
40
41static struct cpu_features cpu;
42static u32 cpu_vendor[3];
43static u32 err_flags[NCAPINTS];
44
45#ifdef CONFIG_X86_64
46static const int req_level = 64;
47#elif defined(CONFIG_X86_MINIMUM_CPU_FAMILY)
48static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
49#else
50static const int req_level = 3;
51#endif
52
53static const u32 req_flags[NCAPINTS] =
54{
55 REQUIRED_MASK0,
56 REQUIRED_MASK1,
57 REQUIRED_MASK2,
58 REQUIRED_MASK3,
59 REQUIRED_MASK4,
60 REQUIRED_MASK5,
61 REQUIRED_MASK6,
62 REQUIRED_MASK7,
63};
64
65#define A32(a,b,c,d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
66
67static int is_amd(void)
68{
69 return cpu_vendor[0] == A32('A','u','t','h') &&
70 cpu_vendor[1] == A32('e','n','t','i') &&
71 cpu_vendor[2] == A32('c','A','M','D');
72}
73
74static int is_centaur(void)
75{
76 return cpu_vendor[0] == A32('C','e','n','t') &&
77 cpu_vendor[1] == A32('a','u','r','H') &&
78 cpu_vendor[2] == A32('a','u','l','s');
79}
80
81static int is_transmeta(void)
82{
83 return cpu_vendor[0] == A32('G','e','n','u') &&
84 cpu_vendor[1] == A32('i','n','e','T') &&
85 cpu_vendor[2] == A32('M','x','8','6');
86}
87
88static int has_fpu(void)
89{
90 u16 fcw = -1, fsw = -1;
91 u32 cr0;
92
93 asm("movl %%cr0,%0" : "=r" (cr0));
94 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
95 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
96 asm volatile("movl %0,%%cr0" : : "r" (cr0));
97 }
98
99 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
100 : "+m" (fsw), "+m" (fcw));
101
102 return fsw == 0 && (fcw & 0x103f) == 0x003f;
103}
104
105static int has_eflag(u32 mask)
106{
107 u32 f0, f1;
108
109 asm("pushfl ; "
110 "pushfl ; "
111 "popl %0 ; "
112 "movl %0,%1 ; "
113 "xorl %2,%1 ; "
114 "pushl %1 ; "
115 "popfl ; "
116 "pushfl ; "
117 "popl %1 ; "
118 "popfl"
119 : "=&r" (f0), "=&r" (f1)
120 : "ri" (mask));
121
122 return !!((f0^f1) & mask);
123}
124
125static void get_flags(void)
126{
127 u32 max_intel_level, max_amd_level;
128 u32 tfms;
129
130 if (has_fpu())
131 set_bit(X86_FEATURE_FPU, cpu.flags);
132
133 if (has_eflag(X86_EFLAGS_ID)) {
134 asm("cpuid"
135 : "=a" (max_intel_level),
136 "=b" (cpu_vendor[0]),
137 "=d" (cpu_vendor[1]),
138 "=c" (cpu_vendor[2])
139 : "a" (0));
140
141 if (max_intel_level >= 0x00000001 &&
142 max_intel_level <= 0x0000ffff) {
143 asm("cpuid"
144 : "=a" (tfms),
145 "=c" (cpu.flags[4]),
146 "=d" (cpu.flags[0])
147 : "a" (0x00000001)
148 : "ebx");
149 cpu.level = (tfms >> 8) & 15;
150 cpu.model = (tfms >> 4) & 15;
151 if (cpu.level >= 6)
152 cpu.model += ((tfms >> 16) & 0xf) << 4;
153 }
154
155 asm("cpuid"
156 : "=a" (max_amd_level)
157 : "a" (0x80000000)
158 : "ebx", "ecx", "edx");
159
160 if (max_amd_level >= 0x80000001 &&
161 max_amd_level <= 0x8000ffff) {
162 u32 eax = 0x80000001;
163 asm("cpuid"
164 : "+a" (eax),
165 "=c" (cpu.flags[6]),
166 "=d" (cpu.flags[1])
167 : : "ebx");
168 }
169 }
170}
171
172/* Returns a bitmask of which words we have error bits in */
173static int check_flags(void)
174{
175 u32 err;
176 int i;
177
178 err = 0;
179 for (i = 0; i < NCAPINTS; i++) {
180 err_flags[i] = req_flags[i] & ~cpu.flags[i];
181 if (err_flags[i])
182 err |= 1 << i;
183 }
184
185 return err;
186}
187
188/*
189 * Returns -1 on error.
190 *
191 * *cpu_level is set to the current CPU level; *req_level to the required
192 * level. x86-64 is considered level 64 for this purpose.
193 *
194 * *err_flags_ptr is set to the flags error array if there are flags missing.
195 */
196int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
197{
198 int err;
199
200 memset(&cpu.flags, 0, sizeof cpu.flags);
201 cpu.level = 3;
202
203 if (has_eflag(X86_EFLAGS_AC))
204 cpu.level = 4;
205
206 get_flags();
207 err = check_flags();
208
209 if (test_bit(X86_FEATURE_LM, cpu.flags))
210 cpu.level = 64;
211
212 if (err == 0x01 &&
213 !(err_flags[0] &
214 ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
215 is_amd()) {
216 /* If this is an AMD and we're only missing SSE+SSE2, try to
217 turn them on */
218
219 u32 ecx = MSR_K7_HWCR;
220 u32 eax, edx;
221
222 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
223 eax &= ~(1 << 15);
224 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
225
226 get_flags(); /* Make sure it really did something */
227 err = check_flags();
228 } else if (err == 0x01 &&
229 !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
230 is_centaur() && cpu.model >= 6) {
231 /* If this is a VIA C3, we might have to enable CX8
232 explicitly */
233
234 u32 ecx = MSR_VIA_FCR;
235 u32 eax, edx;
236
237 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
238 eax |= (1<<1)|(1<<7);
239 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
240
241 set_bit(X86_FEATURE_CX8, cpu.flags);
242 err = check_flags();
243 } else if (err == 0x01 && is_transmeta()) {
244 /* Transmeta might have masked feature bits in word 0 */
245
246 u32 ecx = 0x80860004;
247 u32 eax, edx;
248 u32 level = 1;
249
250 asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
251 asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
252 asm("cpuid"
253 : "+a" (level), "=d" (cpu.flags[0])
254 : : "ecx", "ebx");
255 asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
256
257 err = check_flags();
258 }
259
260 if (err_flags_ptr)
261 *err_flags_ptr = err ? err_flags : NULL;
262 if (cpu_level_ptr)
263 *cpu_level_ptr = cpu.level;
264 if (req_level_ptr)
265 *req_level_ptr = req_level;
266
267 return (cpu.level < req_level || err) ? -1 : 0;
268}