diff options
Diffstat (limited to 'arch/x86/kernel/cpu/intel.c')
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 333 |
1 files changed, 333 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c new file mode 100644 index 000000000000..dc4e08147b1f --- /dev/null +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -0,0 +1,333 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/kernel.h> | ||
3 | |||
4 | #include <linux/string.h> | ||
5 | #include <linux/bitops.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/thread_info.h> | ||
8 | #include <linux/module.h> | ||
9 | |||
10 | #include <asm/processor.h> | ||
11 | #include <asm/msr.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | |||
14 | #include "cpu.h" | ||
15 | |||
16 | #ifdef CONFIG_X86_LOCAL_APIC | ||
17 | #include <asm/mpspec.h> | ||
18 | #include <asm/apic.h> | ||
19 | #include <mach_apic.h> | ||
20 | #endif | ||
21 | |||
22 | extern int trap_init_f00f_bug(void); | ||
23 | |||
24 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
25 | /* | ||
26 | * Alignment at which movsl is preferred for bulk memory copies. | ||
27 | */ | ||
28 | struct movsl_mask movsl_mask __read_mostly; | ||
29 | #endif | ||
30 | |||
31 | void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) | ||
32 | { | ||
33 | if (c->x86_vendor != X86_VENDOR_INTEL) | ||
34 | return; | ||
35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
36 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
37 | c->x86_cache_alignment = 128; | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * Early probe support logic for ppro memory erratum #50 | ||
42 | * | ||
43 | * This is called before we do cpu ident work | ||
44 | */ | ||
45 | |||
46 | int __cpuinit ppro_with_ram_bug(void) | ||
47 | { | ||
48 | /* Uses data from early_cpu_detect now */ | ||
49 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
50 | boot_cpu_data.x86 == 6 && | ||
51 | boot_cpu_data.x86_model == 1 && | ||
52 | boot_cpu_data.x86_mask < 8) { | ||
53 | printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); | ||
54 | return 1; | ||
55 | } | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | |||
60 | /* | ||
61 | * P4 Xeon errata 037 workaround. | ||
62 | * Hardware prefetcher may cause stale data to be loaded into the cache. | ||
63 | */ | ||
64 | static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | ||
65 | { | ||
66 | unsigned long lo, hi; | ||
67 | |||
68 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | ||
69 | rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); | ||
70 | if ((lo & (1<<9)) == 0) { | ||
71 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | ||
72 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | ||
73 | lo |= (1<<9); /* Disable hw prefetching */ | ||
74 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | ||
75 | } | ||
76 | } | ||
77 | } | ||
78 | |||
79 | |||
80 | /* | ||
81 | * find out the number of processor cores on the die | ||
82 | */ | ||
83 | static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) | ||
84 | { | ||
85 | unsigned int eax, ebx, ecx, edx; | ||
86 | |||
87 | if (c->cpuid_level < 4) | ||
88 | return 1; | ||
89 | |||
90 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ | ||
91 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | ||
92 | if (eax & 0x1f) | ||
93 | return ((eax >> 26) + 1); | ||
94 | else | ||
95 | return 1; | ||
96 | } | ||
97 | |||
98 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | ||
99 | { | ||
100 | unsigned int l2 = 0; | ||
101 | char *p = NULL; | ||
102 | |||
103 | #ifdef CONFIG_X86_F00F_BUG | ||
104 | /* | ||
105 | * All current models of Pentium and Pentium with MMX technology CPUs | ||
106 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | ||
107 | * Note that the workaround only should be initialized once... | ||
108 | */ | ||
109 | c->f00f_bug = 0; | ||
110 | if (!paravirt_enabled() && c->x86 == 5) { | ||
111 | static int f00f_workaround_enabled = 0; | ||
112 | |||
113 | c->f00f_bug = 1; | ||
114 | if ( !f00f_workaround_enabled ) { | ||
115 | trap_init_f00f_bug(); | ||
116 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | ||
117 | f00f_workaround_enabled = 1; | ||
118 | } | ||
119 | } | ||
120 | #endif | ||
121 | |||
122 | select_idle_routine(c); | ||
123 | l2 = init_intel_cacheinfo(c); | ||
124 | if (c->cpuid_level > 9 ) { | ||
125 | unsigned eax = cpuid_eax(10); | ||
126 | /* Check for version and the number of counters */ | ||
127 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | ||
128 | set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); | ||
129 | } | ||
130 | |||
131 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ | ||
132 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | ||
133 | clear_bit(X86_FEATURE_SEP, c->x86_capability); | ||
134 | |||
135 | /* Names for the Pentium II/Celeron processors | ||
136 | detectable only by also checking the cache size. | ||
137 | Dixon is NOT a Celeron. */ | ||
138 | if (c->x86 == 6) { | ||
139 | switch (c->x86_model) { | ||
140 | case 5: | ||
141 | if (c->x86_mask == 0) { | ||
142 | if (l2 == 0) | ||
143 | p = "Celeron (Covington)"; | ||
144 | else if (l2 == 256) | ||
145 | p = "Mobile Pentium II (Dixon)"; | ||
146 | } | ||
147 | break; | ||
148 | |||
149 | case 6: | ||
150 | if (l2 == 128) | ||
151 | p = "Celeron (Mendocino)"; | ||
152 | else if (c->x86_mask == 0 || c->x86_mask == 5) | ||
153 | p = "Celeron-A"; | ||
154 | break; | ||
155 | |||
156 | case 8: | ||
157 | if (l2 == 128) | ||
158 | p = "Celeron (Coppermine)"; | ||
159 | break; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | if ( p ) | ||
164 | strcpy(c->x86_model_id, p); | ||
165 | |||
166 | c->x86_max_cores = num_cpu_cores(c); | ||
167 | |||
168 | detect_ht(c); | ||
169 | |||
170 | /* Work around errata */ | ||
171 | Intel_errata_workarounds(c); | ||
172 | |||
173 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
174 | /* | ||
175 | * Set up the preferred alignment for movsl bulk memory moves | ||
176 | */ | ||
177 | switch (c->x86) { | ||
178 | case 4: /* 486: untested */ | ||
179 | break; | ||
180 | case 5: /* Old Pentia: untested */ | ||
181 | break; | ||
182 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | ||
183 | movsl_mask.mask = 7; | ||
184 | break; | ||
185 | case 15: /* P4 is OK down to 8-byte alignment */ | ||
186 | movsl_mask.mask = 7; | ||
187 | break; | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | if (c->x86 == 15) { | ||
192 | set_bit(X86_FEATURE_P4, c->x86_capability); | ||
193 | set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability); | ||
194 | } | ||
195 | if (c->x86 == 6) | ||
196 | set_bit(X86_FEATURE_P3, c->x86_capability); | ||
197 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
198 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
199 | set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); | ||
200 | |||
201 | if (cpu_has_ds) { | ||
202 | unsigned int l1; | ||
203 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
204 | if (!(l1 & (1<<11))) | ||
205 | set_bit(X86_FEATURE_BTS, c->x86_capability); | ||
206 | if (!(l1 & (1<<12))) | ||
207 | set_bit(X86_FEATURE_PEBS, c->x86_capability); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) | ||
212 | { | ||
213 | /* Intel PIII Tualatin. This comes in two flavours. | ||
214 | * One has 256kb of cache, the other 512. We have no way | ||
215 | * to determine which, so we use a boottime override | ||
216 | * for the 512kb model, and assume 256 otherwise. | ||
217 | */ | ||
218 | if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) | ||
219 | size = 256; | ||
220 | return size; | ||
221 | } | ||
222 | |||
223 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | ||
224 | .c_vendor = "Intel", | ||
225 | .c_ident = { "GenuineIntel" }, | ||
226 | .c_models = { | ||
227 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = | ||
228 | { | ||
229 | [0] = "486 DX-25/33", | ||
230 | [1] = "486 DX-50", | ||
231 | [2] = "486 SX", | ||
232 | [3] = "486 DX/2", | ||
233 | [4] = "486 SL", | ||
234 | [5] = "486 SX/2", | ||
235 | [7] = "486 DX/2-WB", | ||
236 | [8] = "486 DX/4", | ||
237 | [9] = "486 DX/4-WB" | ||
238 | } | ||
239 | }, | ||
240 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = | ||
241 | { | ||
242 | [0] = "Pentium 60/66 A-step", | ||
243 | [1] = "Pentium 60/66", | ||
244 | [2] = "Pentium 75 - 200", | ||
245 | [3] = "OverDrive PODP5V83", | ||
246 | [4] = "Pentium MMX", | ||
247 | [7] = "Mobile Pentium 75 - 200", | ||
248 | [8] = "Mobile Pentium MMX" | ||
249 | } | ||
250 | }, | ||
251 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = | ||
252 | { | ||
253 | [0] = "Pentium Pro A-step", | ||
254 | [1] = "Pentium Pro", | ||
255 | [3] = "Pentium II (Klamath)", | ||
256 | [4] = "Pentium II (Deschutes)", | ||
257 | [5] = "Pentium II (Deschutes)", | ||
258 | [6] = "Mobile Pentium II", | ||
259 | [7] = "Pentium III (Katmai)", | ||
260 | [8] = "Pentium III (Coppermine)", | ||
261 | [10] = "Pentium III (Cascades)", | ||
262 | [11] = "Pentium III (Tualatin)", | ||
263 | } | ||
264 | }, | ||
265 | { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = | ||
266 | { | ||
267 | [0] = "Pentium 4 (Unknown)", | ||
268 | [1] = "Pentium 4 (Willamette)", | ||
269 | [2] = "Pentium 4 (Northwood)", | ||
270 | [4] = "Pentium 4 (Foster)", | ||
271 | [5] = "Pentium 4 (Foster)", | ||
272 | } | ||
273 | }, | ||
274 | }, | ||
275 | .c_init = init_intel, | ||
276 | .c_size_cache = intel_size_cache, | ||
277 | }; | ||
278 | |||
279 | __init int intel_cpu_init(void) | ||
280 | { | ||
281 | cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | #ifndef CONFIG_X86_CMPXCHG | ||
286 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
287 | { | ||
288 | u8 prev; | ||
289 | unsigned long flags; | ||
290 | |||
291 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
292 | local_irq_save(flags); | ||
293 | prev = *(u8 *)ptr; | ||
294 | if (prev == old) | ||
295 | *(u8 *)ptr = new; | ||
296 | local_irq_restore(flags); | ||
297 | return prev; | ||
298 | } | ||
299 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
300 | |||
301 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
302 | { | ||
303 | u16 prev; | ||
304 | unsigned long flags; | ||
305 | |||
306 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
307 | local_irq_save(flags); | ||
308 | prev = *(u16 *)ptr; | ||
309 | if (prev == old) | ||
310 | *(u16 *)ptr = new; | ||
311 | local_irq_restore(flags); | ||
312 | return prev; | ||
313 | } | ||
314 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
315 | |||
316 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
317 | { | ||
318 | u32 prev; | ||
319 | unsigned long flags; | ||
320 | |||
321 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
322 | local_irq_save(flags); | ||
323 | prev = *(u32 *)ptr; | ||
324 | if (prev == old) | ||
325 | *(u32 *)ptr = new; | ||
326 | local_irq_restore(flags); | ||
327 | return prev; | ||
328 | } | ||
329 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
330 | #endif | ||
331 | |||
332 | // arch_initcall(intel_cpu_init); | ||
333 | |||