diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 121 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/centaur.c | 490 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 180 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpu.h | 26 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cyrix.c | 136 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/feature_names.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 106 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_32.c | 50 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/non-fatal.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/p5.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/p6.c | 48 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/winchip.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 139 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/if.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/state.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/nexgen.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/proc.c | 170 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/transmeta.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/umc.c | 19 |
21 files changed, 908 insertions, 696 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index a0c4d7c5dbd7..ee7c45235e54 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -3,9 +3,9 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o |
6 | obj-y += feature_names.o | 6 | obj-y += proc.o feature_names.o |
7 | 7 | ||
8 | obj-$(CONFIG_X86_32) += common.o proc.o bugs.o | 8 | obj-$(CONFIG_X86_32) += common.o bugs.o |
9 | obj-$(CONFIG_X86_32) += amd.o | 9 | obj-$(CONFIG_X86_32) += amd.o |
10 | obj-$(CONFIG_X86_32) += cyrix.o | 10 | obj-$(CONFIG_X86_32) += cyrix.o |
11 | obj-$(CONFIG_X86_32) += centaur.o | 11 | obj-$(CONFIG_X86_32) += centaur.o |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 693e353999cd..0173065dc3b7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -4,8 +4,8 @@ | |||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/apic.h> | 6 | #include <asm/apic.h> |
7 | #include <asm/mach_apic.h> | ||
8 | 7 | ||
8 | #include <mach_apic.h> | ||
9 | #include "cpu.h" | 9 | #include "cpu.h" |
10 | 10 | ||
11 | /* | 11 | /* |
@@ -20,7 +20,7 @@ | |||
20 | * the chip setting when fixing the bug but they also tweaked some | 20 | * the chip setting when fixing the bug but they also tweaked some |
21 | * performance at the same time.. | 21 | * performance at the same time.. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | extern void vide(void); | 24 | extern void vide(void); |
25 | __asm__(".align 4\nvide: ret"); | 25 | __asm__(".align 4\nvide: ret"); |
26 | 26 | ||
@@ -63,12 +63,12 @@ static __cpuinit int amd_apic_timer_broken(void) | |||
63 | 63 | ||
64 | int force_mwait __cpuinitdata; | 64 | int force_mwait __cpuinitdata; |
65 | 65 | ||
66 | void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 66 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) |
67 | { | 67 | { |
68 | if (cpuid_eax(0x80000000) >= 0x80000007) { | 68 | if (cpuid_eax(0x80000000) >= 0x80000007) { |
69 | c->x86_power = cpuid_edx(0x80000007); | 69 | c->x86_power = cpuid_edx(0x80000007); |
70 | if (c->x86_power & (1<<8)) | 70 | if (c->x86_power & (1<<8)) |
71 | set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); | 71 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
72 | } | 72 | } |
73 | } | 73 | } |
74 | 74 | ||
@@ -81,7 +81,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
81 | #ifdef CONFIG_SMP | 81 | #ifdef CONFIG_SMP |
82 | unsigned long long value; | 82 | unsigned long long value; |
83 | 83 | ||
84 | /* Disable TLB flush filter by setting HWCR.FFDIS on K8 | 84 | /* |
85 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | ||
85 | * bit 6 of msr C001_0015 | 86 | * bit 6 of msr C001_0015 |
86 | * | 87 | * |
87 | * Errata 63 for SH-B3 steppings | 88 | * Errata 63 for SH-B3 steppings |
@@ -102,15 +103,16 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
102 | * no bus pipeline) | 103 | * no bus pipeline) |
103 | */ | 104 | */ |
104 | 105 | ||
105 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 106 | /* |
106 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | 107 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
107 | clear_bit(0*32+31, c->x86_capability); | 108 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
108 | 109 | */ | |
110 | clear_cpu_cap(c, 0*32+31); | ||
111 | |||
109 | r = get_model_name(c); | 112 | r = get_model_name(c); |
110 | 113 | ||
111 | switch(c->x86) | 114 | switch (c->x86) { |
112 | { | 115 | case 4: |
113 | case 4: | ||
114 | /* | 116 | /* |
115 | * General Systems BIOSen alias the cpu frequency registers | 117 | * General Systems BIOSen alias the cpu frequency registers |
116 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | 118 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux |
@@ -120,61 +122,60 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
120 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | 122 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ |
121 | #define CBAR_ENB (0x80000000) | 123 | #define CBAR_ENB (0x80000000) |
122 | #define CBAR_KEY (0X000000CB) | 124 | #define CBAR_KEY (0X000000CB) |
123 | if (c->x86_model==9 || c->x86_model == 10) { | 125 | if (c->x86_model == 9 || c->x86_model == 10) { |
124 | if (inl (CBAR) & CBAR_ENB) | 126 | if (inl (CBAR) & CBAR_ENB) |
125 | outl (0 | CBAR_KEY, CBAR); | 127 | outl (0 | CBAR_KEY, CBAR); |
126 | } | 128 | } |
127 | break; | 129 | break; |
128 | case 5: | 130 | case 5: |
129 | if( c->x86_model < 6 ) | 131 | if (c->x86_model < 6) { |
130 | { | ||
131 | /* Based on AMD doc 20734R - June 2000 */ | 132 | /* Based on AMD doc 20734R - June 2000 */ |
132 | if ( c->x86_model == 0 ) { | 133 | if (c->x86_model == 0) { |
133 | clear_bit(X86_FEATURE_APIC, c->x86_capability); | 134 | clear_cpu_cap(c, X86_FEATURE_APIC); |
134 | set_bit(X86_FEATURE_PGE, c->x86_capability); | 135 | set_cpu_cap(c, X86_FEATURE_PGE); |
135 | } | 136 | } |
136 | break; | 137 | break; |
137 | } | 138 | } |
138 | 139 | ||
139 | if ( c->x86_model == 6 && c->x86_mask == 1 ) { | 140 | if (c->x86_model == 6 && c->x86_mask == 1) { |
140 | const int K6_BUG_LOOP = 1000000; | 141 | const int K6_BUG_LOOP = 1000000; |
141 | int n; | 142 | int n; |
142 | void (*f_vide)(void); | 143 | void (*f_vide)(void); |
143 | unsigned long d, d2; | 144 | unsigned long d, d2; |
144 | 145 | ||
145 | printk(KERN_INFO "AMD K6 stepping B detected - "); | 146 | printk(KERN_INFO "AMD K6 stepping B detected - "); |
146 | 147 | ||
147 | /* | 148 | /* |
148 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | 149 | * It looks like AMD fixed the 2.6.2 bug and improved indirect |
149 | * calls at the same time. | 150 | * calls at the same time. |
150 | */ | 151 | */ |
151 | 152 | ||
152 | n = K6_BUG_LOOP; | 153 | n = K6_BUG_LOOP; |
153 | f_vide = vide; | 154 | f_vide = vide; |
154 | rdtscl(d); | 155 | rdtscl(d); |
155 | while (n--) | 156 | while (n--) |
156 | f_vide(); | 157 | f_vide(); |
157 | rdtscl(d2); | 158 | rdtscl(d2); |
158 | d = d2-d; | 159 | d = d2-d; |
159 | 160 | ||
160 | if (d > 20*K6_BUG_LOOP) | 161 | if (d > 20*K6_BUG_LOOP) |
161 | printk("system stability may be impaired when more than 32 MB are used.\n"); | 162 | printk("system stability may be impaired when more than 32 MB are used.\n"); |
162 | else | 163 | else |
163 | printk("probably OK (after B9730xxxx).\n"); | 164 | printk("probably OK (after B9730xxxx).\n"); |
164 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | 165 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); |
165 | } | 166 | } |
166 | 167 | ||
167 | /* K6 with old style WHCR */ | 168 | /* K6 with old style WHCR */ |
168 | if (c->x86_model < 8 || | 169 | if (c->x86_model < 8 || |
169 | (c->x86_model== 8 && c->x86_mask < 8)) { | 170 | (c->x86_model == 8 && c->x86_mask < 8)) { |
170 | /* We can only write allocate on the low 508Mb */ | 171 | /* We can only write allocate on the low 508Mb */ |
171 | if(mbytes>508) | 172 | if (mbytes > 508) |
172 | mbytes=508; | 173 | mbytes = 508; |
173 | 174 | ||
174 | rdmsr(MSR_K6_WHCR, l, h); | 175 | rdmsr(MSR_K6_WHCR, l, h); |
175 | if ((l&0x0000FFFF)==0) { | 176 | if ((l&0x0000FFFF) == 0) { |
176 | unsigned long flags; | 177 | unsigned long flags; |
177 | l=(1<<0)|((mbytes/4)<<1); | 178 | l = (1<<0)|((mbytes/4)<<1); |
178 | local_irq_save(flags); | 179 | local_irq_save(flags); |
179 | wbinvd(); | 180 | wbinvd(); |
180 | wrmsr(MSR_K6_WHCR, l, h); | 181 | wrmsr(MSR_K6_WHCR, l, h); |
@@ -185,17 +186,17 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
185 | break; | 186 | break; |
186 | } | 187 | } |
187 | 188 | ||
188 | if ((c->x86_model == 8 && c->x86_mask >7) || | 189 | if ((c->x86_model == 8 && c->x86_mask > 7) || |
189 | c->x86_model == 9 || c->x86_model == 13) { | 190 | c->x86_model == 9 || c->x86_model == 13) { |
190 | /* The more serious chips .. */ | 191 | /* The more serious chips .. */ |
191 | 192 | ||
192 | if(mbytes>4092) | 193 | if (mbytes > 4092) |
193 | mbytes=4092; | 194 | mbytes = 4092; |
194 | 195 | ||
195 | rdmsr(MSR_K6_WHCR, l, h); | 196 | rdmsr(MSR_K6_WHCR, l, h); |
196 | if ((l&0xFFFF0000)==0) { | 197 | if ((l&0xFFFF0000) == 0) { |
197 | unsigned long flags; | 198 | unsigned long flags; |
198 | l=((mbytes>>2)<<22)|(1<<16); | 199 | l = ((mbytes>>2)<<22)|(1<<16); |
199 | local_irq_save(flags); | 200 | local_irq_save(flags); |
200 | wbinvd(); | 201 | wbinvd(); |
201 | wrmsr(MSR_K6_WHCR, l, h); | 202 | wrmsr(MSR_K6_WHCR, l, h); |
@@ -207,7 +208,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
207 | /* Set MTRR capability flag if appropriate */ | 208 | /* Set MTRR capability flag if appropriate */ |
208 | if (c->x86_model == 13 || c->x86_model == 9 || | 209 | if (c->x86_model == 13 || c->x86_model == 9 || |
209 | (c->x86_model == 8 && c->x86_mask >= 8)) | 210 | (c->x86_model == 8 && c->x86_mask >= 8)) |
210 | set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); | 211 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); |
211 | break; | 212 | break; |
212 | } | 213 | } |
213 | 214 | ||
@@ -217,10 +218,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
217 | break; | 218 | break; |
218 | } | 219 | } |
219 | break; | 220 | break; |
220 | case 6: /* An Athlon/Duron */ | 221 | case 6: /* An Athlon/Duron */ |
221 | 222 | ||
222 | /* Bit 15 of Athlon specific MSR 15, needs to be 0 | 223 | /* |
223 | * to enable SSE on Palomino/Morgan/Barton CPU's. | 224 | * Bit 15 of Athlon specific MSR 15, needs to be 0 |
225 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
224 | * If the BIOS didn't enable it already, enable it here. | 226 | * If the BIOS didn't enable it already, enable it here. |
225 | */ | 227 | */ |
226 | if (c->x86_model >= 6 && c->x86_model <= 10) { | 228 | if (c->x86_model >= 6 && c->x86_model <= 10) { |
@@ -229,15 +231,16 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
229 | rdmsr(MSR_K7_HWCR, l, h); | 231 | rdmsr(MSR_K7_HWCR, l, h); |
230 | l &= ~0x00008000; | 232 | l &= ~0x00008000; |
231 | wrmsr(MSR_K7_HWCR, l, h); | 233 | wrmsr(MSR_K7_HWCR, l, h); |
232 | set_bit(X86_FEATURE_XMM, c->x86_capability); | 234 | set_cpu_cap(c, X86_FEATURE_XMM); |
233 | } | 235 | } |
234 | } | 236 | } |
235 | 237 | ||
236 | /* It's been determined by AMD that Athlons since model 8 stepping 1 | 238 | /* |
239 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
237 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | 240 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx |
238 | * As per AMD technical note 27212 0.2 | 241 | * As per AMD technical note 27212 0.2 |
239 | */ | 242 | */ |
240 | if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) { | 243 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { |
241 | rdmsr(MSR_K7_CLK_CTL, l, h); | 244 | rdmsr(MSR_K7_CLK_CTL, l, h); |
242 | if ((l & 0xfff00000) != 0x20000000) { | 245 | if ((l & 0xfff00000) != 0x20000000) { |
243 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | 246 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, |
@@ -253,20 +256,19 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
253 | /* Use K8 tuning for Fam10h and Fam11h */ | 256 | /* Use K8 tuning for Fam10h and Fam11h */ |
254 | case 0x10: | 257 | case 0x10: |
255 | case 0x11: | 258 | case 0x11: |
256 | set_bit(X86_FEATURE_K8, c->x86_capability); | 259 | set_cpu_cap(c, X86_FEATURE_K8); |
257 | break; | 260 | break; |
258 | case 6: | 261 | case 6: |
259 | set_bit(X86_FEATURE_K7, c->x86_capability); | 262 | set_cpu_cap(c, X86_FEATURE_K7); |
260 | break; | 263 | break; |
261 | } | 264 | } |
262 | if (c->x86 >= 6) | 265 | if (c->x86 >= 6) |
263 | set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability); | 266 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); |
264 | 267 | ||
265 | display_cacheinfo(c); | 268 | display_cacheinfo(c); |
266 | 269 | ||
267 | if (cpuid_eax(0x80000000) >= 0x80000008) { | 270 | if (cpuid_eax(0x80000000) >= 0x80000008) |
268 | c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; | 271 | c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; |
269 | } | ||
270 | 272 | ||
271 | #ifdef CONFIG_X86_HT | 273 | #ifdef CONFIG_X86_HT |
272 | /* | 274 | /* |
@@ -302,20 +304,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
302 | 304 | ||
303 | /* K6s reports MCEs but don't actually have all the MSRs */ | 305 | /* K6s reports MCEs but don't actually have all the MSRs */ |
304 | if (c->x86 < 6) | 306 | if (c->x86 < 6) |
305 | clear_bit(X86_FEATURE_MCE, c->x86_capability); | 307 | clear_cpu_cap(c, X86_FEATURE_MCE); |
306 | 308 | ||
307 | if (cpu_has_xmm2) | 309 | if (cpu_has_xmm2) |
308 | set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability); | 310 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
309 | } | 311 | } |
310 | 312 | ||
311 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 313 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
312 | { | 314 | { |
313 | /* AMD errata T13 (order #21922) */ | 315 | /* AMD errata T13 (order #21922) */ |
314 | if ((c->x86 == 6)) { | 316 | if ((c->x86 == 6)) { |
315 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | 317 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ |
316 | size = 64; | 318 | size = 64; |
317 | if (c->x86_model == 4 && | 319 | if (c->x86_model == 4 && |
318 | (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */ | 320 | (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ |
319 | size = 256; | 321 | size = 256; |
320 | } | 322 | } |
321 | return size; | 323 | return size; |
@@ -323,19 +325,20 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned in | |||
323 | 325 | ||
324 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | 326 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { |
325 | .c_vendor = "AMD", | 327 | .c_vendor = "AMD", |
326 | .c_ident = { "AuthenticAMD" }, | 328 | .c_ident = { "AuthenticAMD" }, |
327 | .c_models = { | 329 | .c_models = { |
328 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | 330 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = |
329 | { | 331 | { |
330 | [3] = "486 DX/2", | 332 | [3] = "486 DX/2", |
331 | [7] = "486 DX/2-WB", | 333 | [7] = "486 DX/2-WB", |
332 | [8] = "486 DX/4", | 334 | [8] = "486 DX/4", |
333 | [9] = "486 DX/4-WB", | 335 | [9] = "486 DX/4-WB", |
334 | [14] = "Am5x86-WT", | 336 | [14] = "Am5x86-WT", |
335 | [15] = "Am5x86-WB" | 337 | [15] = "Am5x86-WB" |
336 | } | 338 | } |
337 | }, | 339 | }, |
338 | }, | 340 | }, |
341 | .c_early_init = early_init_amd, | ||
339 | .c_init = init_amd, | 342 | .c_init = init_amd, |
340 | .c_size_cache = amd_size_cache, | 343 | .c_size_cache = amd_size_cache, |
341 | }; | 344 | }; |
@@ -345,3 +348,5 @@ int __init amd_init_cpu(void) | |||
345 | cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; | 348 | cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; |
346 | return 0; | 349 | return 0; |
347 | } | 350 | } |
351 | |||
352 | cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); | ||
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 9681fa15ddf0..e0f45edd6a55 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -1,31 +1,34 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/init.h> | 2 | #include <linux/init.h> |
3 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
4 | |||
4 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
5 | #include <asm/msr.h> | 6 | #include <asm/msr.h> |
6 | #include <asm/e820.h> | 7 | #include <asm/e820.h> |
7 | #include <asm/mtrr.h> | 8 | #include <asm/mtrr.h> |
9 | |||
8 | #include "cpu.h" | 10 | #include "cpu.h" |
9 | 11 | ||
10 | #ifdef CONFIG_X86_OOSTORE | 12 | #ifdef CONFIG_X86_OOSTORE |
11 | 13 | ||
12 | static u32 __cpuinit power2(u32 x) | 14 | static u32 __cpuinit power2(u32 x) |
13 | { | 15 | { |
14 | u32 s=1; | 16 | u32 s = 1; |
15 | while(s<=x) | 17 | |
16 | s<<=1; | 18 | while (s <= x) |
17 | return s>>=1; | 19 | s <<= 1; |
20 | |||
21 | return s >>= 1; | ||
18 | } | 22 | } |
19 | 23 | ||
20 | 24 | ||
21 | /* | 25 | /* |
22 | * Set up an actual MCR | 26 | * Set up an actual MCR |
23 | */ | 27 | */ |
24 | |||
25 | static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) | 28 | static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) |
26 | { | 29 | { |
27 | u32 lo, hi; | 30 | u32 lo, hi; |
28 | 31 | ||
29 | hi = base & ~0xFFF; | 32 | hi = base & ~0xFFF; |
30 | lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ | 33 | lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ |
31 | lo &= ~0xFFF; /* Remove the ctrl value bits */ | 34 | lo &= ~0xFFF; /* Remove the ctrl value bits */ |
@@ -35,30 +38,28 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) | |||
35 | } | 38 | } |
36 | 39 | ||
37 | /* | 40 | /* |
38 | * Figure what we can cover with MCR's | 41 | * Figure what we can cover with MCR's |
39 | * | 42 | * |
40 | * Shortcut: We know you can't put 4Gig of RAM on a winchip | 43 | * Shortcut: We know you can't put 4Gig of RAM on a winchip |
41 | */ | 44 | */ |
42 | 45 | static u32 __cpuinit ramtop(void) | |
43 | static u32 __cpuinit ramtop(void) /* 16388 */ | ||
44 | { | 46 | { |
45 | int i; | ||
46 | u32 top = 0; | ||
47 | u32 clip = 0xFFFFFFFFUL; | 47 | u32 clip = 0xFFFFFFFFUL; |
48 | 48 | u32 top = 0; | |
49 | int i; | ||
50 | |||
49 | for (i = 0; i < e820.nr_map; i++) { | 51 | for (i = 0; i < e820.nr_map; i++) { |
50 | unsigned long start, end; | 52 | unsigned long start, end; |
51 | 53 | ||
52 | if (e820.map[i].addr > 0xFFFFFFFFUL) | 54 | if (e820.map[i].addr > 0xFFFFFFFFUL) |
53 | continue; | 55 | continue; |
54 | /* | 56 | /* |
55 | * Don't MCR over reserved space. Ignore the ISA hole | 57 | * Don't MCR over reserved space. Ignore the ISA hole |
56 | * we frob around that catastrophe already | 58 | * we frob around that catastrophe already |
57 | */ | 59 | */ |
58 | 60 | if (e820.map[i].type == E820_RESERVED) { | |
59 | if (e820.map[i].type == E820_RESERVED) | 61 | if (e820.map[i].addr >= 0x100000UL && |
60 | { | 62 | e820.map[i].addr < clip) |
61 | if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip) | ||
62 | clip = e820.map[i].addr; | 63 | clip = e820.map[i].addr; |
63 | continue; | 64 | continue; |
64 | } | 65 | } |
@@ -69,28 +70,27 @@ static u32 __cpuinit ramtop(void) /* 16388 */ | |||
69 | if (end > top) | 70 | if (end > top) |
70 | top = end; | 71 | top = end; |
71 | } | 72 | } |
72 | /* Everything below 'top' should be RAM except for the ISA hole. | 73 | /* |
73 | Because of the limited MCR's we want to map NV/ACPI into our | 74 | * Everything below 'top' should be RAM except for the ISA hole. |
74 | MCR range for gunk in RAM | 75 | * Because of the limited MCR's we want to map NV/ACPI into our |
75 | 76 | * MCR range for gunk in RAM | |
76 | Clip might cause us to MCR insufficient RAM but that is an | 77 | * |
77 | acceptable failure mode and should only bite obscure boxes with | 78 | * Clip might cause us to MCR insufficient RAM but that is an |
78 | a VESA hole at 15Mb | 79 | * acceptable failure mode and should only bite obscure boxes with |
79 | 80 | * a VESA hole at 15Mb | |
80 | The second case Clip sometimes kicks in is when the EBDA is marked | 81 | * |
81 | as reserved. Again we fail safe with reasonable results | 82 | * The second case Clip sometimes kicks in is when the EBDA is marked |
82 | */ | 83 | * as reserved. Again we fail safe with reasonable results |
83 | 84 | */ | |
84 | if(top>clip) | 85 | if (top > clip) |
85 | top=clip; | 86 | top = clip; |
86 | 87 | ||
87 | return top; | 88 | return top; |
88 | } | 89 | } |
89 | 90 | ||
90 | /* | 91 | /* |
91 | * Compute a set of MCR's to give maximum coverage | 92 | * Compute a set of MCR's to give maximum coverage |
92 | */ | 93 | */ |
93 | |||
94 | static int __cpuinit centaur_mcr_compute(int nr, int key) | 94 | static int __cpuinit centaur_mcr_compute(int nr, int key) |
95 | { | 95 | { |
96 | u32 mem = ramtop(); | 96 | u32 mem = ramtop(); |
@@ -99,141 +99,131 @@ static int __cpuinit centaur_mcr_compute(int nr, int key) | |||
99 | u32 top = root; | 99 | u32 top = root; |
100 | u32 floor = 0; | 100 | u32 floor = 0; |
101 | int ct = 0; | 101 | int ct = 0; |
102 | 102 | ||
103 | while(ct<nr) | 103 | while (ct < nr) { |
104 | { | ||
105 | u32 fspace = 0; | 104 | u32 fspace = 0; |
105 | u32 high; | ||
106 | u32 low; | ||
106 | 107 | ||
107 | /* | 108 | /* |
108 | * Find the largest block we will fill going upwards | 109 | * Find the largest block we will fill going upwards |
109 | */ | 110 | */ |
110 | 111 | high = power2(mem-top); | |
111 | u32 high = power2(mem-top); | ||
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Find the largest block we will fill going downwards | 114 | * Find the largest block we will fill going downwards |
115 | */ | 115 | */ |
116 | 116 | low = base/2; | |
117 | u32 low = base/2; | ||
118 | 117 | ||
119 | /* | 118 | /* |
120 | * Don't fill below 1Mb going downwards as there | 119 | * Don't fill below 1Mb going downwards as there |
121 | * is an ISA hole in the way. | 120 | * is an ISA hole in the way. |
122 | */ | 121 | */ |
123 | 122 | if (base <= 1024*1024) | |
124 | if(base <= 1024*1024) | ||
125 | low = 0; | 123 | low = 0; |
126 | 124 | ||
127 | /* | 125 | /* |
128 | * See how much space we could cover by filling below | 126 | * See how much space we could cover by filling below |
129 | * the ISA hole | 127 | * the ISA hole |
130 | */ | 128 | */ |
131 | 129 | ||
132 | if(floor == 0) | 130 | if (floor == 0) |
133 | fspace = 512*1024; | 131 | fspace = 512*1024; |
134 | else if(floor ==512*1024) | 132 | else if (floor == 512*1024) |
135 | fspace = 128*1024; | 133 | fspace = 128*1024; |
136 | 134 | ||
137 | /* And forget ROM space */ | 135 | /* And forget ROM space */ |
138 | 136 | ||
139 | /* | 137 | /* |
140 | * Now install the largest coverage we get | 138 | * Now install the largest coverage we get |
141 | */ | 139 | */ |
142 | 140 | if (fspace > high && fspace > low) { | |
143 | if(fspace > high && fspace > low) | ||
144 | { | ||
145 | centaur_mcr_insert(ct, floor, fspace, key); | 141 | centaur_mcr_insert(ct, floor, fspace, key); |
146 | floor += fspace; | 142 | floor += fspace; |
147 | } | 143 | } else if (high > low) { |
148 | else if(high > low) | ||
149 | { | ||
150 | centaur_mcr_insert(ct, top, high, key); | 144 | centaur_mcr_insert(ct, top, high, key); |
151 | top += high; | 145 | top += high; |
152 | } | 146 | } else if (low > 0) { |
153 | else if(low > 0) | ||
154 | { | ||
155 | base -= low; | 147 | base -= low; |
156 | centaur_mcr_insert(ct, base, low, key); | 148 | centaur_mcr_insert(ct, base, low, key); |
157 | } | 149 | } else |
158 | else break; | 150 | break; |
159 | ct++; | 151 | ct++; |
160 | } | 152 | } |
161 | /* | 153 | /* |
162 | * We loaded ct values. We now need to set the mask. The caller | 154 | * We loaded ct values. We now need to set the mask. The caller |
163 | * must do this bit. | 155 | * must do this bit. |
164 | */ | 156 | */ |
165 | |||
166 | return ct; | 157 | return ct; |
167 | } | 158 | } |
168 | 159 | ||
169 | static void __cpuinit centaur_create_optimal_mcr(void) | 160 | static void __cpuinit centaur_create_optimal_mcr(void) |
170 | { | 161 | { |
162 | int used; | ||
171 | int i; | 163 | int i; |
164 | |||
172 | /* | 165 | /* |
173 | * Allocate up to 6 mcrs to mark as much of ram as possible | 166 | * Allocate up to 6 mcrs to mark as much of ram as possible |
174 | * as write combining and weak write ordered. | 167 | * as write combining and weak write ordered. |
175 | * | 168 | * |
176 | * To experiment with: Linux never uses stack operations for | 169 | * To experiment with: Linux never uses stack operations for |
177 | * mmio spaces so we could globally enable stack operation wc | 170 | * mmio spaces so we could globally enable stack operation wc |
178 | * | 171 | * |
179 | * Load the registers with type 31 - full write combining, all | 172 | * Load the registers with type 31 - full write combining, all |
180 | * writes weakly ordered. | 173 | * writes weakly ordered. |
181 | */ | 174 | */ |
182 | int used = centaur_mcr_compute(6, 31); | 175 | used = centaur_mcr_compute(6, 31); |
183 | 176 | ||
184 | /* | 177 | /* |
185 | * Wipe unused MCRs | 178 | * Wipe unused MCRs |
186 | */ | 179 | */ |
187 | 180 | for (i = used; i < 8; i++) | |
188 | for(i=used;i<8;i++) | ||
189 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | 181 | wrmsr(MSR_IDT_MCR0+i, 0, 0); |
190 | } | 182 | } |
191 | 183 | ||
192 | static void __cpuinit winchip2_create_optimal_mcr(void) | 184 | static void __cpuinit winchip2_create_optimal_mcr(void) |
193 | { | 185 | { |
194 | u32 lo, hi; | 186 | u32 lo, hi; |
187 | int used; | ||
195 | int i; | 188 | int i; |
196 | 189 | ||
197 | /* | 190 | /* |
198 | * Allocate up to 6 mcrs to mark as much of ram as possible | 191 | * Allocate up to 6 mcrs to mark as much of ram as possible |
199 | * as write combining, weak store ordered. | 192 | * as write combining, weak store ordered. |
200 | * | 193 | * |
201 | * Load the registers with type 25 | 194 | * Load the registers with type 25 |
202 | * 8 - weak write ordering | 195 | * 8 - weak write ordering |
203 | * 16 - weak read ordering | 196 | * 16 - weak read ordering |
204 | * 1 - write combining | 197 | * 1 - write combining |
205 | */ | 198 | */ |
199 | used = centaur_mcr_compute(6, 25); | ||
206 | 200 | ||
207 | int used = centaur_mcr_compute(6, 25); | ||
208 | |||
209 | /* | 201 | /* |
210 | * Mark the registers we are using. | 202 | * Mark the registers we are using. |
211 | */ | 203 | */ |
212 | |||
213 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 204 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
214 | for(i=0;i<used;i++) | 205 | for (i = 0; i < used; i++) |
215 | lo|=1<<(9+i); | 206 | lo |= 1<<(9+i); |
216 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 207 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
217 | 208 | ||
218 | /* | 209 | /* |
219 | * Wipe unused MCRs | 210 | * Wipe unused MCRs |
220 | */ | 211 | */ |
221 | 212 | ||
222 | for(i=used;i<8;i++) | 213 | for (i = used; i < 8; i++) |
223 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | 214 | wrmsr(MSR_IDT_MCR0+i, 0, 0); |
224 | } | 215 | } |
225 | 216 | ||
226 | /* | 217 | /* |
227 | * Handle the MCR key on the Winchip 2. | 218 | * Handle the MCR key on the Winchip 2. |
228 | */ | 219 | */ |
229 | |||
230 | static void __cpuinit winchip2_unprotect_mcr(void) | 220 | static void __cpuinit winchip2_unprotect_mcr(void) |
231 | { | 221 | { |
232 | u32 lo, hi; | 222 | u32 lo, hi; |
233 | u32 key; | 223 | u32 key; |
234 | 224 | ||
235 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 225 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
236 | lo&=~0x1C0; /* blank bits 8-6 */ | 226 | lo &= ~0x1C0; /* blank bits 8-6 */ |
237 | key = (lo>>17) & 7; | 227 | key = (lo>>17) & 7; |
238 | lo |= key<<6; /* replace with unlock key */ | 228 | lo |= key<<6; /* replace with unlock key */ |
239 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 229 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
@@ -242,9 +232,9 @@ static void __cpuinit winchip2_unprotect_mcr(void) | |||
242 | static void __cpuinit winchip2_protect_mcr(void) | 232 | static void __cpuinit winchip2_protect_mcr(void) |
243 | { | 233 | { |
244 | u32 lo, hi; | 234 | u32 lo, hi; |
245 | 235 | ||
246 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 236 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
247 | lo&=~0x1C0; /* blank bits 8-6 */ | 237 | lo &= ~0x1C0; /* blank bits 8-6 */ |
248 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 238 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
249 | } | 239 | } |
250 | #endif /* CONFIG_X86_OOSTORE */ | 240 | #endif /* CONFIG_X86_OOSTORE */ |
@@ -267,17 +257,17 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
267 | 257 | ||
268 | /* enable ACE unit, if present and disabled */ | 258 | /* enable ACE unit, if present and disabled */ |
269 | if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { | 259 | if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { |
270 | rdmsr (MSR_VIA_FCR, lo, hi); | 260 | rdmsr(MSR_VIA_FCR, lo, hi); |
271 | lo |= ACE_FCR; /* enable ACE unit */ | 261 | lo |= ACE_FCR; /* enable ACE unit */ |
272 | wrmsr (MSR_VIA_FCR, lo, hi); | 262 | wrmsr(MSR_VIA_FCR, lo, hi); |
273 | printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); | 263 | printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); |
274 | } | 264 | } |
275 | 265 | ||
276 | /* enable RNG unit, if present and disabled */ | 266 | /* enable RNG unit, if present and disabled */ |
277 | if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { | 267 | if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { |
278 | rdmsr (MSR_VIA_RNG, lo, hi); | 268 | rdmsr(MSR_VIA_RNG, lo, hi); |
279 | lo |= RNG_ENABLE; /* enable RNG unit */ | 269 | lo |= RNG_ENABLE; /* enable RNG unit */ |
280 | wrmsr (MSR_VIA_RNG, lo, hi); | 270 | wrmsr(MSR_VIA_RNG, lo, hi); |
281 | printk(KERN_INFO "CPU: Enabled h/w RNG\n"); | 271 | printk(KERN_INFO "CPU: Enabled h/w RNG\n"); |
282 | } | 272 | } |
283 | 273 | ||
@@ -288,171 +278,183 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
288 | } | 278 | } |
289 | 279 | ||
290 | /* Cyrix III family needs CX8 & PGE explicitly enabled. */ | 280 | /* Cyrix III family needs CX8 & PGE explicitly enabled. */ |
291 | if (c->x86_model >=6 && c->x86_model <= 9) { | 281 | if (c->x86_model >= 6 && c->x86_model <= 9) { |
292 | rdmsr (MSR_VIA_FCR, lo, hi); | 282 | rdmsr(MSR_VIA_FCR, lo, hi); |
293 | lo |= (1<<1 | 1<<7); | 283 | lo |= (1<<1 | 1<<7); |
294 | wrmsr (MSR_VIA_FCR, lo, hi); | 284 | wrmsr(MSR_VIA_FCR, lo, hi); |
295 | set_bit(X86_FEATURE_CX8, c->x86_capability); | 285 | set_cpu_cap(c, X86_FEATURE_CX8); |
296 | } | 286 | } |
297 | 287 | ||
298 | /* Before Nehemiah, the C3's had 3dNOW! */ | 288 | /* Before Nehemiah, the C3's had 3dNOW! */ |
299 | if (c->x86_model >=6 && c->x86_model <9) | 289 | if (c->x86_model >= 6 && c->x86_model < 9) |
300 | set_bit(X86_FEATURE_3DNOW, c->x86_capability); | 290 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
301 | 291 | ||
302 | get_model_name(c); | 292 | get_model_name(c); |
303 | display_cacheinfo(c); | 293 | display_cacheinfo(c); |
304 | } | 294 | } |
305 | 295 | ||
296 | enum { | ||
297 | ECX8 = 1<<1, | ||
298 | EIERRINT = 1<<2, | ||
299 | DPM = 1<<3, | ||
300 | DMCE = 1<<4, | ||
301 | DSTPCLK = 1<<5, | ||
302 | ELINEAR = 1<<6, | ||
303 | DSMC = 1<<7, | ||
304 | DTLOCK = 1<<8, | ||
305 | EDCTLB = 1<<8, | ||
306 | EMMX = 1<<9, | ||
307 | DPDC = 1<<11, | ||
308 | EBRPRED = 1<<12, | ||
309 | DIC = 1<<13, | ||
310 | DDC = 1<<14, | ||
311 | DNA = 1<<15, | ||
312 | ERETSTK = 1<<16, | ||
313 | E2MMX = 1<<19, | ||
314 | EAMD3D = 1<<20, | ||
315 | }; | ||
316 | |||
306 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 317 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
307 | { | 318 | { |
308 | enum { | ||
309 | ECX8=1<<1, | ||
310 | EIERRINT=1<<2, | ||
311 | DPM=1<<3, | ||
312 | DMCE=1<<4, | ||
313 | DSTPCLK=1<<5, | ||
314 | ELINEAR=1<<6, | ||
315 | DSMC=1<<7, | ||
316 | DTLOCK=1<<8, | ||
317 | EDCTLB=1<<8, | ||
318 | EMMX=1<<9, | ||
319 | DPDC=1<<11, | ||
320 | EBRPRED=1<<12, | ||
321 | DIC=1<<13, | ||
322 | DDC=1<<14, | ||
323 | DNA=1<<15, | ||
324 | ERETSTK=1<<16, | ||
325 | E2MMX=1<<19, | ||
326 | EAMD3D=1<<20, | ||
327 | }; | ||
328 | 319 | ||
329 | char *name; | 320 | char *name; |
330 | u32 fcr_set=0; | 321 | u32 fcr_set = 0; |
331 | u32 fcr_clr=0; | 322 | u32 fcr_clr = 0; |
332 | u32 lo,hi,newlo; | 323 | u32 lo, hi, newlo; |
333 | u32 aa,bb,cc,dd; | 324 | u32 aa, bb, cc, dd; |
334 | 325 | ||
335 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 326 | /* |
336 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | 327 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
337 | clear_bit(0*32+31, c->x86_capability); | 328 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
329 | */ | ||
330 | clear_cpu_cap(c, 0*32+31); | ||
338 | 331 | ||
339 | switch (c->x86) { | 332 | switch (c->x86) { |
340 | 333 | case 5: | |
341 | case 5: | 334 | switch (c->x86_model) { |
342 | switch(c->x86_model) { | 335 | case 4: |
343 | case 4: | 336 | name = "C6"; |
344 | name="C6"; | 337 | fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; |
345 | fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK; | 338 | fcr_clr = DPDC; |
346 | fcr_clr=DPDC; | 339 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); |
347 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); | 340 | clear_cpu_cap(c, X86_FEATURE_TSC); |
348 | clear_bit(X86_FEATURE_TSC, c->x86_capability); | ||
349 | #ifdef CONFIG_X86_OOSTORE | 341 | #ifdef CONFIG_X86_OOSTORE |
350 | centaur_create_optimal_mcr(); | 342 | centaur_create_optimal_mcr(); |
351 | /* Enable | 343 | /* |
352 | write combining on non-stack, non-string | 344 | * Enable: |
353 | write combining on string, all types | 345 | * write combining on non-stack, non-string |
354 | weak write ordering | 346 | * write combining on string, all types |
355 | 347 | * weak write ordering | |
356 | The C6 original lacks weak read order | 348 | * |
357 | 349 | * The C6 original lacks weak read order | |
358 | Note 0x120 is write only on Winchip 1 */ | 350 | * |
359 | 351 | * Note 0x120 is write only on Winchip 1 | |
360 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); | 352 | */ |
361 | #endif | 353 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); |
354 | #endif | ||
355 | break; | ||
356 | case 8: | ||
357 | switch (c->x86_mask) { | ||
358 | default: | ||
359 | name = "2"; | ||
360 | break; | ||
361 | case 7 ... 9: | ||
362 | name = "2A"; | ||
362 | break; | 363 | break; |
363 | case 8: | 364 | case 10 ... 15: |
364 | switch(c->x86_mask) { | 365 | name = "2B"; |
365 | default: | 366 | break; |
366 | name="2"; | 367 | } |
367 | break; | 368 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
368 | case 7 ... 9: | 369 | E2MMX|EAMD3D; |
369 | name="2A"; | 370 | fcr_clr = DPDC; |
370 | break; | ||
371 | case 10 ... 15: | ||
372 | name="2B"; | ||
373 | break; | ||
374 | } | ||
375 | fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; | ||
376 | fcr_clr=DPDC; | ||
377 | #ifdef CONFIG_X86_OOSTORE | 371 | #ifdef CONFIG_X86_OOSTORE |
378 | winchip2_unprotect_mcr(); | 372 | winchip2_unprotect_mcr(); |
379 | winchip2_create_optimal_mcr(); | 373 | winchip2_create_optimal_mcr(); |
380 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 374 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
381 | /* Enable | 375 | /* |
382 | write combining on non-stack, non-string | 376 | * Enable: |
383 | write combining on string, all types | 377 | * write combining on non-stack, non-string |
384 | weak write ordering | 378 | * write combining on string, all types |
385 | */ | 379 | * weak write ordering |
386 | lo|=31; | 380 | */ |
387 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 381 | lo |= 31; |
388 | winchip2_protect_mcr(); | 382 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
383 | winchip2_protect_mcr(); | ||
389 | #endif | 384 | #endif |
390 | break; | 385 | break; |
391 | case 9: | 386 | case 9: |
392 | name="3"; | 387 | name = "3"; |
393 | fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; | 388 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
394 | fcr_clr=DPDC; | 389 | E2MMX|EAMD3D; |
390 | fcr_clr = DPDC; | ||
395 | #ifdef CONFIG_X86_OOSTORE | 391 | #ifdef CONFIG_X86_OOSTORE |
396 | winchip2_unprotect_mcr(); | 392 | winchip2_unprotect_mcr(); |
397 | winchip2_create_optimal_mcr(); | 393 | winchip2_create_optimal_mcr(); |
398 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | 394 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); |
399 | /* Enable | 395 | /* |
400 | write combining on non-stack, non-string | 396 | * Enable: |
401 | write combining on string, all types | 397 | * write combining on non-stack, non-string |
402 | weak write ordering | 398 | * write combining on string, all types |
403 | */ | 399 | * weak write ordering |
404 | lo|=31; | 400 | */ |
405 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 401 | lo |= 31; |
406 | winchip2_protect_mcr(); | 402 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
403 | winchip2_protect_mcr(); | ||
407 | #endif | 404 | #endif |
408 | break; | 405 | break; |
409 | default: | 406 | default: |
410 | name="??"; | 407 | name = "??"; |
411 | } | 408 | } |
412 | 409 | ||
413 | rdmsr(MSR_IDT_FCR1, lo, hi); | 410 | rdmsr(MSR_IDT_FCR1, lo, hi); |
414 | newlo=(lo|fcr_set) & (~fcr_clr); | 411 | newlo = (lo|fcr_set) & (~fcr_clr); |
415 | 412 | ||
416 | if (newlo!=lo) { | 413 | if (newlo != lo) { |
417 | printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo ); | 414 | printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", |
418 | wrmsr(MSR_IDT_FCR1, newlo, hi ); | 415 | lo, newlo); |
419 | } else { | 416 | wrmsr(MSR_IDT_FCR1, newlo, hi); |
420 | printk(KERN_INFO "Centaur FCR is 0x%X\n",lo); | 417 | } else { |
421 | } | 418 | printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); |
422 | /* Emulate MTRRs using Centaur's MCR. */ | 419 | } |
423 | set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability); | 420 | /* Emulate MTRRs using Centaur's MCR. */ |
424 | /* Report CX8 */ | 421 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); |
425 | set_bit(X86_FEATURE_CX8, c->x86_capability); | 422 | /* Report CX8 */ |
426 | /* Set 3DNow! on Winchip 2 and above. */ | 423 | set_cpu_cap(c, X86_FEATURE_CX8); |
427 | if (c->x86_model >=8) | 424 | /* Set 3DNow! on Winchip 2 and above. */ |
428 | set_bit(X86_FEATURE_3DNOW, c->x86_capability); | 425 | if (c->x86_model >= 8) |
429 | /* See if we can find out some more. */ | 426 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
430 | if ( cpuid_eax(0x80000000) >= 0x80000005 ) { | 427 | /* See if we can find out some more. */ |
431 | /* Yes, we can. */ | 428 | if (cpuid_eax(0x80000000) >= 0x80000005) { |
432 | cpuid(0x80000005,&aa,&bb,&cc,&dd); | 429 | /* Yes, we can. */ |
433 | /* Add L1 data and code cache sizes. */ | 430 | cpuid(0x80000005, &aa, &bb, &cc, &dd); |
434 | c->x86_cache_size = (cc>>24)+(dd>>24); | 431 | /* Add L1 data and code cache sizes. */ |
435 | } | 432 | c->x86_cache_size = (cc>>24)+(dd>>24); |
436 | sprintf( c->x86_model_id, "WinChip %s", name ); | 433 | } |
437 | break; | 434 | sprintf(c->x86_model_id, "WinChip %s", name); |
435 | break; | ||
438 | 436 | ||
439 | case 6: | 437 | case 6: |
440 | init_c3(c); | 438 | init_c3(c); |
441 | break; | 439 | break; |
442 | } | 440 | } |
443 | } | 441 | } |
444 | 442 | ||
445 | static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 443 | static unsigned int __cpuinit |
444 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | ||
446 | { | 445 | { |
447 | /* VIA C3 CPUs (670-68F) need further shifting. */ | 446 | /* VIA C3 CPUs (670-68F) need further shifting. */ |
448 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) | 447 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) |
449 | size >>= 8; | 448 | size >>= 8; |
450 | 449 | ||
451 | /* VIA also screwed up Nehemiah stepping 1, and made | 450 | /* |
452 | it return '65KB' instead of '64KB' | 451 | * There's also an erratum in Nehemiah stepping 1, which |
453 | - Note, it seems this may only be in engineering samples. */ | 452 | * returns '65KB' instead of '64KB' |
454 | if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65)) | 453 | * - Note, it seems this may only be in engineering samples. |
455 | size -=1; | 454 | */ |
455 | if ((c->x86 == 6) && (c->x86_model == 9) && | ||
456 | (c->x86_mask == 1) && (size == 65)) | ||
457 | size -= 1; | ||
456 | 458 | ||
457 | return size; | 459 | return size; |
458 | } | 460 | } |
@@ -464,8 +466,4 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | |||
464 | .c_size_cache = centaur_size_cache, | 466 | .c_size_cache = centaur_size_cache, |
465 | }; | 467 | }; |
466 | 468 | ||
467 | int __init centaur_init_cpu(void) | 469 | cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev); |
468 | { | ||
469 | cpu_devs[X86_VENDOR_CENTAUR] = ¢aur_cpu_dev; | ||
470 | return 0; | ||
471 | } | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a38aafaefc23..d999d7833bc2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -62,9 +62,9 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | |||
62 | static int cachesize_override __cpuinitdata = -1; | 62 | static int cachesize_override __cpuinitdata = -1; |
63 | static int disable_x86_serial_nr __cpuinitdata = 1; | 63 | static int disable_x86_serial_nr __cpuinitdata = 1; |
64 | 64 | ||
65 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | 65 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
66 | 66 | ||
67 | static void __cpuinit default_init(struct cpuinfo_x86 * c) | 67 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
68 | { | 68 | { |
69 | /* Not much we can do here... */ | 69 | /* Not much we can do here... */ |
70 | /* Check if at least it has cpuid */ | 70 | /* Check if at least it has cpuid */ |
@@ -81,11 +81,11 @@ static struct cpu_dev __cpuinitdata default_cpu = { | |||
81 | .c_init = default_init, | 81 | .c_init = default_init, |
82 | .c_vendor = "Unknown", | 82 | .c_vendor = "Unknown", |
83 | }; | 83 | }; |
84 | static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu; | 84 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; |
85 | 85 | ||
86 | static int __init cachesize_setup(char *str) | 86 | static int __init cachesize_setup(char *str) |
87 | { | 87 | { |
88 | get_option (&str, &cachesize_override); | 88 | get_option(&str, &cachesize_override); |
89 | return 1; | 89 | return 1; |
90 | } | 90 | } |
91 | __setup("cachesize=", cachesize_setup); | 91 | __setup("cachesize=", cachesize_setup); |
@@ -107,12 +107,12 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
107 | /* Intel chips right-justify this string for some dumb reason; | 107 | /* Intel chips right-justify this string for some dumb reason; |
108 | undo that brain damage */ | 108 | undo that brain damage */ |
109 | p = q = &c->x86_model_id[0]; | 109 | p = q = &c->x86_model_id[0]; |
110 | while ( *p == ' ' ) | 110 | while (*p == ' ') |
111 | p++; | 111 | p++; |
112 | if ( p != q ) { | 112 | if (p != q) { |
113 | while ( *p ) | 113 | while (*p) |
114 | *q++ = *p++; | 114 | *q++ = *p++; |
115 | while ( q <= &c->x86_model_id[48] ) | 115 | while (q <= &c->x86_model_id[48]) |
116 | *q++ = '\0'; /* Zero-pad the rest */ | 116 | *q++ = '\0'; /* Zero-pad the rest */ |
117 | } | 117 | } |
118 | 118 | ||
@@ -130,7 +130,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); |
131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
133 | c->x86_cache_size=(ecx>>24)+(edx>>24); | 133 | c->x86_cache_size = (ecx>>24)+(edx>>24); |
134 | } | 134 | } |
135 | 135 | ||
136 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 136 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
@@ -138,16 +138,16 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
138 | 138 | ||
139 | ecx = cpuid_ecx(0x80000006); | 139 | ecx = cpuid_ecx(0x80000006); |
140 | l2size = ecx >> 16; | 140 | l2size = ecx >> 16; |
141 | 141 | ||
142 | /* do processor-specific cache resizing */ | 142 | /* do processor-specific cache resizing */ |
143 | if (this_cpu->c_size_cache) | 143 | if (this_cpu->c_size_cache) |
144 | l2size = this_cpu->c_size_cache(c,l2size); | 144 | l2size = this_cpu->c_size_cache(c, l2size); |
145 | 145 | ||
146 | /* Allow user to override all this if necessary. */ | 146 | /* Allow user to override all this if necessary. */ |
147 | if (cachesize_override != -1) | 147 | if (cachesize_override != -1) |
148 | l2size = cachesize_override; | 148 | l2size = cachesize_override; |
149 | 149 | ||
150 | if ( l2size == 0 ) | 150 | if (l2size == 0) |
151 | return; /* Again, no L2 cache is possible */ | 151 | return; /* Again, no L2 cache is possible */ |
152 | 152 | ||
153 | c->x86_cache_size = l2size; | 153 | c->x86_cache_size = l2size; |
@@ -156,16 +156,19 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
156 | l2size, ecx & 0xFF); | 156 | l2size, ecx & 0xFF); |
157 | } | 157 | } |
158 | 158 | ||
159 | /* Naming convention should be: <Name> [(<Codename>)] */ | 159 | /* |
160 | /* This table only is used unless init_<vendor>() below doesn't set it; */ | 160 | * Naming convention should be: <Name> [(<Codename>)] |
161 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | 161 | * This table only is used unless init_<vendor>() below doesn't set it; |
162 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
163 | * | ||
164 | */ | ||
162 | 165 | ||
163 | /* Look up CPU names by table lookup. */ | 166 | /* Look up CPU names by table lookup. */ |
164 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | 167 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) |
165 | { | 168 | { |
166 | struct cpu_model_info *info; | 169 | struct cpu_model_info *info; |
167 | 170 | ||
168 | if ( c->x86_model >= 16 ) | 171 | if (c->x86_model >= 16) |
169 | return NULL; /* Range check */ | 172 | return NULL; /* Range check */ |
170 | 173 | ||
171 | if (!this_cpu) | 174 | if (!this_cpu) |
@@ -190,9 +193,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |||
190 | 193 | ||
191 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 194 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
192 | if (cpu_devs[i]) { | 195 | if (cpu_devs[i]) { |
193 | if (!strcmp(v,cpu_devs[i]->c_ident[0]) || | 196 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
194 | (cpu_devs[i]->c_ident[1] && | 197 | (cpu_devs[i]->c_ident[1] && |
195 | !strcmp(v,cpu_devs[i]->c_ident[1]))) { | 198 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
196 | c->x86_vendor = i; | 199 | c->x86_vendor = i; |
197 | if (!early) | 200 | if (!early) |
198 | this_cpu = cpu_devs[i]; | 201 | this_cpu = cpu_devs[i]; |
@@ -210,7 +213,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | |||
210 | } | 213 | } |
211 | 214 | ||
212 | 215 | ||
213 | static int __init x86_fxsr_setup(char * s) | 216 | static int __init x86_fxsr_setup(char *s) |
214 | { | 217 | { |
215 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | 218 | setup_clear_cpu_cap(X86_FEATURE_FXSR); |
216 | setup_clear_cpu_cap(X86_FEATURE_XMM); | 219 | setup_clear_cpu_cap(X86_FEATURE_XMM); |
@@ -219,7 +222,7 @@ static int __init x86_fxsr_setup(char * s) | |||
219 | __setup("nofxsr", x86_fxsr_setup); | 222 | __setup("nofxsr", x86_fxsr_setup); |
220 | 223 | ||
221 | 224 | ||
222 | static int __init x86_sep_setup(char * s) | 225 | static int __init x86_sep_setup(char *s) |
223 | { | 226 | { |
224 | setup_clear_cpu_cap(X86_FEATURE_SEP); | 227 | setup_clear_cpu_cap(X86_FEATURE_SEP); |
225 | return 1; | 228 | return 1; |
@@ -306,14 +309,30 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |||
306 | 309 | ||
307 | } | 310 | } |
308 | 311 | ||
309 | } | 312 | clear_cpu_cap(c, X86_FEATURE_PAT); |
313 | |||
314 | switch (c->x86_vendor) { | ||
315 | case X86_VENDOR_AMD: | ||
316 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
317 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
318 | break; | ||
319 | case X86_VENDOR_INTEL: | ||
320 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | ||
321 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
322 | break; | ||
323 | } | ||
310 | 324 | ||
311 | /* Do minimum CPU detection early. | 325 | } |
312 | Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment. | ||
313 | The others are not touched to avoid unwanted side effects. | ||
314 | 326 | ||
315 | WARNING: this function is only called on the BP. Don't add code here | 327 | /* |
316 | that is supposed to run on all CPUs. */ | 328 | * Do minimum CPU detection early. |
329 | * Fields really needed: vendor, cpuid_level, family, model, mask, | ||
330 | * cache alignment. | ||
331 | * The others are not touched to avoid unwanted side effects. | ||
332 | * | ||
333 | * WARNING: this function is only called on the BP. Don't add code here | ||
334 | * that is supposed to run on all CPUs. | ||
335 | */ | ||
317 | static void __init early_cpu_detect(void) | 336 | static void __init early_cpu_detect(void) |
318 | { | 337 | { |
319 | struct cpuinfo_x86 *c = &boot_cpu_data; | 338 | struct cpuinfo_x86 *c = &boot_cpu_data; |
@@ -328,19 +347,14 @@ static void __init early_cpu_detect(void) | |||
328 | 347 | ||
329 | get_cpu_vendor(c, 1); | 348 | get_cpu_vendor(c, 1); |
330 | 349 | ||
331 | switch (c->x86_vendor) { | 350 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
332 | case X86_VENDOR_AMD: | 351 | cpu_devs[c->x86_vendor]->c_early_init) |
333 | early_init_amd(c); | 352 | cpu_devs[c->x86_vendor]->c_early_init(c); |
334 | break; | ||
335 | case X86_VENDOR_INTEL: | ||
336 | early_init_intel(c); | ||
337 | break; | ||
338 | } | ||
339 | 353 | ||
340 | early_get_cap(c); | 354 | early_get_cap(c); |
341 | } | 355 | } |
342 | 356 | ||
343 | static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | 357 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
344 | { | 358 | { |
345 | u32 tfms, xlvl; | 359 | u32 tfms, xlvl; |
346 | unsigned int ebx; | 360 | unsigned int ebx; |
@@ -351,13 +365,12 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
351 | (unsigned int *)&c->x86_vendor_id[0], | 365 | (unsigned int *)&c->x86_vendor_id[0], |
352 | (unsigned int *)&c->x86_vendor_id[8], | 366 | (unsigned int *)&c->x86_vendor_id[8], |
353 | (unsigned int *)&c->x86_vendor_id[4]); | 367 | (unsigned int *)&c->x86_vendor_id[4]); |
354 | 368 | ||
355 | get_cpu_vendor(c, 0); | 369 | get_cpu_vendor(c, 0); |
356 | /* Initialize the standard set of capabilities */ | 370 | /* Initialize the standard set of capabilities */ |
357 | /* Note that the vendor-specific code below might override */ | 371 | /* Note that the vendor-specific code below might override */ |
358 | |||
359 | /* Intel-defined flags: level 0x00000001 */ | 372 | /* Intel-defined flags: level 0x00000001 */ |
360 | if ( c->cpuid_level >= 0x00000001 ) { | 373 | if (c->cpuid_level >= 0x00000001) { |
361 | u32 capability, excap; | 374 | u32 capability, excap; |
362 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 375 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
363 | c->x86_capability[0] = capability; | 376 | c->x86_capability[0] = capability; |
@@ -369,12 +382,14 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
369 | if (c->x86 >= 0x6) | 382 | if (c->x86 >= 0x6) |
370 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 383 | c->x86_model += ((tfms >> 16) & 0xF) << 4; |
371 | c->x86_mask = tfms & 15; | 384 | c->x86_mask = tfms & 15; |
385 | c->initial_apicid = (ebx >> 24) & 0xFF; | ||
372 | #ifdef CONFIG_X86_HT | 386 | #ifdef CONFIG_X86_HT |
373 | c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); | 387 | c->apicid = phys_pkg_id(c->initial_apicid, 0); |
388 | c->phys_proc_id = c->initial_apicid; | ||
374 | #else | 389 | #else |
375 | c->apicid = (ebx >> 24) & 0xFF; | 390 | c->apicid = c->initial_apicid; |
376 | #endif | 391 | #endif |
377 | if (c->x86_capability[0] & (1<<19)) | 392 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) |
378 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | 393 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; |
379 | } else { | 394 | } else { |
380 | /* Have CPUID level 0 only - unheard of */ | 395 | /* Have CPUID level 0 only - unheard of */ |
@@ -383,33 +398,42 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c) | |||
383 | 398 | ||
384 | /* AMD-defined flags: level 0x80000001 */ | 399 | /* AMD-defined flags: level 0x80000001 */ |
385 | xlvl = cpuid_eax(0x80000000); | 400 | xlvl = cpuid_eax(0x80000000); |
386 | if ( (xlvl & 0xffff0000) == 0x80000000 ) { | 401 | if ((xlvl & 0xffff0000) == 0x80000000) { |
387 | if ( xlvl >= 0x80000001 ) { | 402 | if (xlvl >= 0x80000001) { |
388 | c->x86_capability[1] = cpuid_edx(0x80000001); | 403 | c->x86_capability[1] = cpuid_edx(0x80000001); |
389 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 404 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
390 | } | 405 | } |
391 | if ( xlvl >= 0x80000004 ) | 406 | if (xlvl >= 0x80000004) |
392 | get_model_name(c); /* Default name */ | 407 | get_model_name(c); /* Default name */ |
393 | } | 408 | } |
394 | 409 | ||
395 | init_scattered_cpuid_features(c); | 410 | init_scattered_cpuid_features(c); |
396 | } | 411 | } |
397 | 412 | ||
398 | #ifdef CONFIG_X86_HT | 413 | clear_cpu_cap(c, X86_FEATURE_PAT); |
399 | c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; | 414 | |
400 | #endif | 415 | switch (c->x86_vendor) { |
416 | case X86_VENDOR_AMD: | ||
417 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
418 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
419 | break; | ||
420 | case X86_VENDOR_INTEL: | ||
421 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | ||
422 | set_cpu_cap(c, X86_FEATURE_PAT); | ||
423 | break; | ||
424 | } | ||
401 | } | 425 | } |
402 | 426 | ||
403 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 427 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
404 | { | 428 | { |
405 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | 429 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { |
406 | /* Disable processor serial number */ | 430 | /* Disable processor serial number */ |
407 | unsigned long lo,hi; | 431 | unsigned long lo, hi; |
408 | rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | 432 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
409 | lo |= 0x200000; | 433 | lo |= 0x200000; |
410 | wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); | 434 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
411 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 435 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
412 | clear_bit(X86_FEATURE_PN, c->x86_capability); | 436 | clear_cpu_cap(c, X86_FEATURE_PN); |
413 | 437 | ||
414 | /* Disabling the serial number may affect the cpuid level */ | 438 | /* Disabling the serial number may affect the cpuid level */ |
415 | c->cpuid_level = cpuid_eax(0); | 439 | c->cpuid_level = cpuid_eax(0); |
@@ -444,9 +468,11 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
444 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 468 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
445 | 469 | ||
446 | if (!have_cpuid_p()) { | 470 | if (!have_cpuid_p()) { |
447 | /* First of all, decide if this is a 486 or higher */ | 471 | /* |
448 | /* It's a 486 if we can modify the AC flag */ | 472 | * First of all, decide if this is a 486 or higher |
449 | if ( flag_is_changeable_p(X86_EFLAGS_AC) ) | 473 | * It's a 486 if we can modify the AC flag |
474 | */ | ||
475 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | ||
450 | c->x86 = 4; | 476 | c->x86 = 4; |
451 | else | 477 | else |
452 | c->x86 = 3; | 478 | c->x86 = 3; |
@@ -479,10 +505,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
479 | */ | 505 | */ |
480 | 506 | ||
481 | /* If the model name is still unset, do table lookup. */ | 507 | /* If the model name is still unset, do table lookup. */ |
482 | if ( !c->x86_model_id[0] ) { | 508 | if (!c->x86_model_id[0]) { |
483 | char *p; | 509 | char *p; |
484 | p = table_lookup_model(c); | 510 | p = table_lookup_model(c); |
485 | if ( p ) | 511 | if (p) |
486 | strcpy(c->x86_model_id, p); | 512 | strcpy(c->x86_model_id, p); |
487 | else | 513 | else |
488 | /* Last resort... */ | 514 | /* Last resort... */ |
@@ -496,9 +522,9 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
496 | * common between the CPUs. The first time this routine gets | 522 | * common between the CPUs. The first time this routine gets |
497 | * executed, c == &boot_cpu_data. | 523 | * executed, c == &boot_cpu_data. |
498 | */ | 524 | */ |
499 | if ( c != &boot_cpu_data ) { | 525 | if (c != &boot_cpu_data) { |
500 | /* AND the already accumulated flags with these */ | 526 | /* AND the already accumulated flags with these */ |
501 | for ( i = 0 ; i < NCAPINTS ; i++ ) | 527 | for (i = 0 ; i < NCAPINTS ; i++) |
502 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 528 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
503 | } | 529 | } |
504 | 530 | ||
@@ -542,7 +568,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
542 | 568 | ||
543 | if (smp_num_siblings == 1) { | 569 | if (smp_num_siblings == 1) { |
544 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 570 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
545 | } else if (smp_num_siblings > 1 ) { | 571 | } else if (smp_num_siblings > 1) { |
546 | 572 | ||
547 | if (smp_num_siblings > NR_CPUS) { | 573 | if (smp_num_siblings > NR_CPUS) { |
548 | printk(KERN_WARNING "CPU: Unsupported number of the " | 574 | printk(KERN_WARNING "CPU: Unsupported number of the " |
@@ -552,7 +578,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
552 | } | 578 | } |
553 | 579 | ||
554 | index_msb = get_count_order(smp_num_siblings); | 580 | index_msb = get_count_order(smp_num_siblings); |
555 | c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); | 581 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); |
556 | 582 | ||
557 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 583 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
558 | c->phys_proc_id); | 584 | c->phys_proc_id); |
@@ -563,7 +589,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
563 | 589 | ||
564 | core_bits = get_count_order(c->x86_max_cores); | 590 | core_bits = get_count_order(c->x86_max_cores); |
565 | 591 | ||
566 | c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & | 592 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & |
567 | ((1 << core_bits) - 1); | 593 | ((1 << core_bits) - 1); |
568 | 594 | ||
569 | if (c->x86_max_cores > 1) | 595 | if (c->x86_max_cores > 1) |
@@ -597,7 +623,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
597 | else | 623 | else |
598 | printk("%s", c->x86_model_id); | 624 | printk("%s", c->x86_model_id); |
599 | 625 | ||
600 | if (c->x86_mask || c->cpuid_level >= 0) | 626 | if (c->x86_mask || c->cpuid_level >= 0) |
601 | printk(" stepping %02x\n", c->x86_mask); | 627 | printk(" stepping %02x\n", c->x86_mask); |
602 | else | 628 | else |
603 | printk("\n"); | 629 | printk("\n"); |
@@ -616,23 +642,15 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
616 | 642 | ||
617 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 643 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
618 | 644 | ||
619 | /* This is hacky. :) | ||
620 | * We're emulating future behavior. | ||
621 | * In the future, the cpu-specific init functions will be called implicitly | ||
622 | * via the magic of initcalls. | ||
623 | * They will insert themselves into the cpu_devs structure. | ||
624 | * Then, when cpu_init() is called, we can just iterate over that array. | ||
625 | */ | ||
626 | void __init early_cpu_init(void) | 645 | void __init early_cpu_init(void) |
627 | { | 646 | { |
628 | intel_cpu_init(); | 647 | struct cpu_vendor_dev *cvdev; |
629 | cyrix_init_cpu(); | 648 | |
630 | nsc_init_cpu(); | 649 | for (cvdev = __x86cpuvendor_start ; |
631 | amd_init_cpu(); | 650 | cvdev < __x86cpuvendor_end ; |
632 | centaur_init_cpu(); | 651 | cvdev++) |
633 | transmeta_init_cpu(); | 652 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; |
634 | nexgen_init_cpu(); | 653 | |
635 | umc_init_cpu(); | ||
636 | early_cpu_detect(); | 654 | early_cpu_detect(); |
637 | } | 655 | } |
638 | 656 | ||
@@ -666,7 +684,7 @@ void __cpuinit cpu_init(void) | |||
666 | { | 684 | { |
667 | int cpu = smp_processor_id(); | 685 | int cpu = smp_processor_id(); |
668 | struct task_struct *curr = current; | 686 | struct task_struct *curr = current; |
669 | struct tss_struct * t = &per_cpu(init_tss, cpu); | 687 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
670 | struct thread_struct *thread = &curr->thread; | 688 | struct thread_struct *thread = &curr->thread; |
671 | 689 | ||
672 | if (cpu_test_and_set(cpu, cpu_initialized)) { | 690 | if (cpu_test_and_set(cpu, cpu_initialized)) { |
@@ -692,7 +710,7 @@ void __cpuinit cpu_init(void) | |||
692 | enter_lazy_tlb(&init_mm, curr); | 710 | enter_lazy_tlb(&init_mm, curr); |
693 | 711 | ||
694 | load_sp0(t, thread); | 712 | load_sp0(t, thread); |
695 | set_tss_desc(cpu,t); | 713 | set_tss_desc(cpu, t); |
696 | load_TR_desc(); | 714 | load_TR_desc(); |
697 | load_LDT(&init_mm.context); | 715 | load_LDT(&init_mm.context); |
698 | 716 | ||
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index e0b38c33d842..783691b2a738 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -14,6 +14,7 @@ struct cpu_dev { | |||
14 | 14 | ||
15 | struct cpu_model_info c_models[4]; | 15 | struct cpu_model_info c_models[4]; |
16 | 16 | ||
17 | void (*c_early_init)(struct cpuinfo_x86 *c); | ||
17 | void (*c_init)(struct cpuinfo_x86 * c); | 18 | void (*c_init)(struct cpuinfo_x86 * c); |
18 | void (*c_identify)(struct cpuinfo_x86 * c); | 19 | void (*c_identify)(struct cpuinfo_x86 * c); |
19 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); | 20 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); |
@@ -21,18 +22,17 @@ struct cpu_dev { | |||
21 | 22 | ||
22 | extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; | 23 | extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; |
23 | 24 | ||
25 | struct cpu_vendor_dev { | ||
26 | int vendor; | ||
27 | struct cpu_dev *cpu_dev; | ||
28 | }; | ||
29 | |||
30 | #define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \ | ||
31 | static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \ | ||
32 | __attribute__((__section__(".x86cpuvendor.init"))) = \ | ||
33 | { cpu_vendor_id, cpu_dev } | ||
34 | |||
35 | extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[]; | ||
36 | |||
24 | extern int get_model_name(struct cpuinfo_x86 *c); | 37 | extern int get_model_name(struct cpuinfo_x86 *c); |
25 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 38 | extern void display_cacheinfo(struct cpuinfo_x86 *c); |
26 | |||
27 | extern void early_init_intel(struct cpuinfo_x86 *c); | ||
28 | extern void early_init_amd(struct cpuinfo_x86 *c); | ||
29 | |||
30 | /* Specific CPU type init functions */ | ||
31 | int intel_cpu_init(void); | ||
32 | int amd_init_cpu(void); | ||
33 | int cyrix_init_cpu(void); | ||
34 | int nsc_init_cpu(void); | ||
35 | int centaur_init_cpu(void); | ||
36 | int transmeta_init_cpu(void); | ||
37 | int nexgen_init_cpu(void); | ||
38 | int umc_init_cpu(void); | ||
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 7139b0262703..3fd7a67bb06a 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -19,7 +19,7 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
19 | { | 19 | { |
20 | unsigned char ccr2, ccr3; | 20 | unsigned char ccr2, ccr3; |
21 | unsigned long flags; | 21 | unsigned long flags; |
22 | 22 | ||
23 | /* we test for DEVID by checking whether CCR3 is writable */ | 23 | /* we test for DEVID by checking whether CCR3 is writable */ |
24 | local_irq_save(flags); | 24 | local_irq_save(flags); |
25 | ccr3 = getCx86(CX86_CCR3); | 25 | ccr3 = getCx86(CX86_CCR3); |
@@ -37,8 +37,7 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
37 | setCx86(CX86_CCR2, ccr2); | 37 | setCx86(CX86_CCR2, ccr2); |
38 | *dir0 = 0xfe; | 38 | *dir0 = 0xfe; |
39 | } | 39 | } |
40 | } | 40 | } else { |
41 | else { | ||
42 | setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ | 41 | setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ |
43 | 42 | ||
44 | /* read DIR0 and DIR1 CPU registers */ | 43 | /* read DIR0 and DIR1 CPU registers */ |
@@ -86,7 +85,7 @@ static char cyrix_model_mult2[] __cpuinitdata = "12233445"; | |||
86 | static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) | 85 | static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) |
87 | { | 86 | { |
88 | unsigned long flags; | 87 | unsigned long flags; |
89 | 88 | ||
90 | if (Cx86_dir0_msb == 3) { | 89 | if (Cx86_dir0_msb == 3) { |
91 | unsigned char ccr3, ccr5; | 90 | unsigned char ccr3, ccr5; |
92 | 91 | ||
@@ -132,7 +131,7 @@ static void __cpuinit set_cx86_memwb(void) | |||
132 | /* set 'Not Write-through' */ | 131 | /* set 'Not Write-through' */ |
133 | write_cr0(read_cr0() | X86_CR0_NW); | 132 | write_cr0(read_cr0() | X86_CR0_NW); |
134 | /* CCR2 bit 2: lock NW bit and set WT1 */ | 133 | /* CCR2 bit 2: lock NW bit and set WT1 */ |
135 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 ); | 134 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); |
136 | } | 135 | } |
137 | 136 | ||
138 | static void __cpuinit set_cx86_inc(void) | 137 | static void __cpuinit set_cx86_inc(void) |
@@ -148,7 +147,7 @@ static void __cpuinit set_cx86_inc(void) | |||
148 | setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); | 147 | setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); |
149 | /* PCR0 -- Performance Control */ | 148 | /* PCR0 -- Performance Control */ |
150 | /* Incrementor Margin 10 */ | 149 | /* Incrementor Margin 10 */ |
151 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); | 150 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); |
152 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 151 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
153 | } | 152 | } |
154 | 153 | ||
@@ -167,16 +166,16 @@ static void __cpuinit geode_configure(void) | |||
167 | 166 | ||
168 | ccr3 = getCx86(CX86_CCR3); | 167 | ccr3 = getCx86(CX86_CCR3); |
169 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 168 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
170 | 169 | ||
171 | 170 | ||
172 | /* FPU fast, DTE cache, Mem bypass */ | 171 | /* FPU fast, DTE cache, Mem bypass */ |
173 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); | 172 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); |
174 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 173 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
175 | 174 | ||
176 | set_cx86_memwb(); | 175 | set_cx86_memwb(); |
177 | set_cx86_reorder(); | 176 | set_cx86_reorder(); |
178 | set_cx86_inc(); | 177 | set_cx86_inc(); |
179 | 178 | ||
180 | local_irq_restore(flags); | 179 | local_irq_restore(flags); |
181 | } | 180 | } |
182 | 181 | ||
@@ -187,14 +186,16 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
187 | char *buf = c->x86_model_id; | 186 | char *buf = c->x86_model_id; |
188 | const char *p = NULL; | 187 | const char *p = NULL; |
189 | 188 | ||
190 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 189 | /* |
191 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | 190 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; |
192 | clear_bit(0*32+31, c->x86_capability); | 191 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
192 | */ | ||
193 | clear_cpu_cap(c, 0*32+31); | ||
193 | 194 | ||
194 | /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ | 195 | /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ |
195 | if ( test_bit(1*32+24, c->x86_capability) ) { | 196 | if (test_cpu_cap(c, 1*32+24)) { |
196 | clear_bit(1*32+24, c->x86_capability); | 197 | clear_cpu_cap(c, 1*32+24); |
197 | set_bit(X86_FEATURE_CXMMX, c->x86_capability); | 198 | set_cpu_cap(c, X86_FEATURE_CXMMX); |
198 | } | 199 | } |
199 | 200 | ||
200 | do_cyrix_devid(&dir0, &dir1); | 201 | do_cyrix_devid(&dir0, &dir1); |
@@ -213,7 +214,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
213 | * the model, multiplier and stepping. Black magic included, | 214 | * the model, multiplier and stepping. Black magic included, |
214 | * to make the silicon step/rev numbers match the printed ones. | 215 | * to make the silicon step/rev numbers match the printed ones. |
215 | */ | 216 | */ |
216 | 217 | ||
217 | switch (dir0_msn) { | 218 | switch (dir0_msn) { |
218 | unsigned char tmp; | 219 | unsigned char tmp; |
219 | 220 | ||
@@ -241,7 +242,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
241 | } else /* 686 */ | 242 | } else /* 686 */ |
242 | p = Cx86_cb+1; | 243 | p = Cx86_cb+1; |
243 | /* Emulate MTRRs using Cyrix's ARRs. */ | 244 | /* Emulate MTRRs using Cyrix's ARRs. */ |
244 | set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); | 245 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); |
245 | /* 6x86's contain this bug */ | 246 | /* 6x86's contain this bug */ |
246 | c->coma_bug = 1; | 247 | c->coma_bug = 1; |
247 | break; | 248 | break; |
@@ -250,17 +251,18 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
250 | #ifdef CONFIG_PCI | 251 | #ifdef CONFIG_PCI |
251 | { | 252 | { |
252 | u32 vendor, device; | 253 | u32 vendor, device; |
253 | /* It isn't really a PCI quirk directly, but the cure is the | 254 | /* |
254 | same. The MediaGX has deep magic SMM stuff that handles the | 255 | * It isn't really a PCI quirk directly, but the cure is the |
255 | SB emulation. It throws away the fifo on disable_dma() which | 256 | * same. The MediaGX has deep magic SMM stuff that handles the |
256 | is wrong and ruins the audio. | 257 | * SB emulation. It throws away the fifo on disable_dma() which |
257 | 258 | * is wrong and ruins the audio. | |
258 | Bug2: VSA1 has a wrap bug so that using maximum sized DMA | 259 | * |
259 | causes bad things. According to NatSemi VSA2 has another | 260 | * Bug2: VSA1 has a wrap bug so that using maximum sized DMA |
260 | bug to do with 'hlt'. I've not seen any boards using VSA2 | 261 | * causes bad things. According to NatSemi VSA2 has another |
261 | and X doesn't seem to support it either so who cares 8). | 262 | * bug to do with 'hlt'. I've not seen any boards using VSA2 |
262 | VSA1 we work around however. | 263 | * and X doesn't seem to support it either so who cares 8). |
263 | */ | 264 | * VSA1 we work around however. |
265 | */ | ||
264 | 266 | ||
265 | printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); | 267 | printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); |
266 | isa_dma_bridge_buggy = 2; | 268 | isa_dma_bridge_buggy = 2; |
@@ -273,55 +275,51 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
273 | 275 | ||
274 | /* | 276 | /* |
275 | * The 5510/5520 companion chips have a funky PIT. | 277 | * The 5510/5520 companion chips have a funky PIT. |
276 | */ | 278 | */ |
277 | if (vendor == PCI_VENDOR_ID_CYRIX && | 279 | if (vendor == PCI_VENDOR_ID_CYRIX && |
278 | (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) | 280 | (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) |
279 | mark_tsc_unstable("cyrix 5510/5520 detected"); | 281 | mark_tsc_unstable("cyrix 5510/5520 detected"); |
280 | } | 282 | } |
281 | #endif | 283 | #endif |
282 | c->x86_cache_size=16; /* Yep 16K integrated cache thats it */ | 284 | c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */ |
283 | 285 | ||
284 | /* GXm supports extended cpuid levels 'ala' AMD */ | 286 | /* GXm supports extended cpuid levels 'ala' AMD */ |
285 | if (c->cpuid_level == 2) { | 287 | if (c->cpuid_level == 2) { |
286 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ | 288 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ |
287 | setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); | 289 | setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); |
288 | 290 | ||
289 | /* | 291 | /* |
290 | * GXm : 0x30 ... 0x5f GXm datasheet 51 | 292 | * GXm : 0x30 ... 0x5f GXm datasheet 51 |
291 | * GXlv: 0x6x GXlv datasheet 54 | 293 | * GXlv: 0x6x GXlv datasheet 54 |
292 | * ? : 0x7x | 294 | * ? : 0x7x |
293 | * GX1 : 0x8x GX1 datasheet 56 | 295 | * GX1 : 0x8x GX1 datasheet 56 |
294 | */ | 296 | */ |
295 | if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f)) | 297 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) |
296 | geode_configure(); | 298 | geode_configure(); |
297 | get_model_name(c); /* get CPU marketing name */ | 299 | get_model_name(c); /* get CPU marketing name */ |
298 | return; | 300 | return; |
299 | } | 301 | } else { /* MediaGX */ |
300 | else { /* MediaGX */ | ||
301 | Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; | 302 | Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; |
302 | p = Cx86_cb+2; | 303 | p = Cx86_cb+2; |
303 | c->x86_model = (dir1 & 0x20) ? 1 : 2; | 304 | c->x86_model = (dir1 & 0x20) ? 1 : 2; |
304 | } | 305 | } |
305 | break; | 306 | break; |
306 | 307 | ||
307 | case 5: /* 6x86MX/M II */ | 308 | case 5: /* 6x86MX/M II */ |
308 | if (dir1 > 7) | 309 | if (dir1 > 7) { |
309 | { | ||
310 | dir0_msn++; /* M II */ | 310 | dir0_msn++; /* M II */ |
311 | /* Enable MMX extensions (App note 108) */ | 311 | /* Enable MMX extensions (App note 108) */ |
312 | setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); | 312 | setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); |
313 | } | 313 | } else { |
314 | else | ||
315 | { | ||
316 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ | 314 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ |
317 | } | 315 | } |
318 | tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; | 316 | tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; |
319 | Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; | 317 | Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; |
320 | p = Cx86_cb+tmp; | 318 | p = Cx86_cb+tmp; |
321 | if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) | 319 | if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) |
322 | (c->x86_model)++; | 320 | (c->x86_model)++; |
323 | /* Emulate MTRRs using Cyrix's ARRs. */ | 321 | /* Emulate MTRRs using Cyrix's ARRs. */ |
324 | set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability); | 322 | set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); |
325 | break; | 323 | break; |
326 | 324 | ||
327 | case 0xf: /* Cyrix 486 without DEVID registers */ | 325 | case 0xf: /* Cyrix 486 without DEVID registers */ |
@@ -343,7 +341,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
343 | break; | 341 | break; |
344 | } | 342 | } |
345 | strcpy(buf, Cx86_model[dir0_msn & 7]); | 343 | strcpy(buf, Cx86_model[dir0_msn & 7]); |
346 | if (p) strcat(buf, p); | 344 | if (p) |
345 | strcat(buf, p); | ||
347 | return; | 346 | return; |
348 | } | 347 | } |
349 | 348 | ||
@@ -352,7 +351,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
352 | */ | 351 | */ |
353 | static void __cpuinit init_nsc(struct cpuinfo_x86 *c) | 352 | static void __cpuinit init_nsc(struct cpuinfo_x86 *c) |
354 | { | 353 | { |
355 | /* There may be GX1 processors in the wild that are branded | 354 | /* |
355 | * There may be GX1 processors in the wild that are branded | ||
356 | * NSC and not Cyrix. | 356 | * NSC and not Cyrix. |
357 | * | 357 | * |
358 | * This function only handles the GX processor, and kicks every | 358 | * This function only handles the GX processor, and kicks every |
@@ -377,7 +377,7 @@ static void __cpuinit init_nsc(struct cpuinfo_x86 *c) | |||
377 | * by the fact that they preserve the flags across the division of 5/2. | 377 | * by the fact that they preserve the flags across the division of 5/2. |
378 | * PII and PPro exhibit this behavior too, but they have cpuid available. | 378 | * PII and PPro exhibit this behavior too, but they have cpuid available. |
379 | */ | 379 | */ |
380 | 380 | ||
381 | /* | 381 | /* |
382 | * Perform the Cyrix 5/2 test. A Cyrix won't change | 382 | * Perform the Cyrix 5/2 test. A Cyrix won't change |
383 | * the flags, while other 486 chips will. | 383 | * the flags, while other 486 chips will. |
@@ -398,27 +398,26 @@ static inline int test_cyrix_52div(void) | |||
398 | return (unsigned char) (test >> 8) == 0x02; | 398 | return (unsigned char) (test >> 8) == 0x02; |
399 | } | 399 | } |
400 | 400 | ||
401 | static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c) | 401 | static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) |
402 | { | 402 | { |
403 | /* Detect Cyrix with disabled CPUID */ | 403 | /* Detect Cyrix with disabled CPUID */ |
404 | if ( c->x86 == 4 && test_cyrix_52div() ) { | 404 | if (c->x86 == 4 && test_cyrix_52div()) { |
405 | unsigned char dir0, dir1; | 405 | unsigned char dir0, dir1; |
406 | 406 | ||
407 | strcpy(c->x86_vendor_id, "CyrixInstead"); | 407 | strcpy(c->x86_vendor_id, "CyrixInstead"); |
408 | c->x86_vendor = X86_VENDOR_CYRIX; | 408 | c->x86_vendor = X86_VENDOR_CYRIX; |
409 | 409 | ||
410 | /* Actually enable cpuid on the older cyrix */ | 410 | /* Actually enable cpuid on the older cyrix */ |
411 | 411 | ||
412 | /* Retrieve CPU revisions */ | 412 | /* Retrieve CPU revisions */ |
413 | 413 | ||
414 | do_cyrix_devid(&dir0, &dir1); | 414 | do_cyrix_devid(&dir0, &dir1); |
415 | 415 | ||
416 | dir0>>=4; | 416 | dir0 >>= 4; |
417 | 417 | ||
418 | /* Check it is an affected model */ | 418 | /* Check it is an affected model */ |
419 | 419 | ||
420 | if (dir0 == 5 || dir0 == 3) | 420 | if (dir0 == 5 || dir0 == 3) { |
421 | { | ||
422 | unsigned char ccr3; | 421 | unsigned char ccr3; |
423 | unsigned long flags; | 422 | unsigned long flags; |
424 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); | 423 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); |
@@ -434,26 +433,17 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c) | |||
434 | 433 | ||
435 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | 434 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { |
436 | .c_vendor = "Cyrix", | 435 | .c_vendor = "Cyrix", |
437 | .c_ident = { "CyrixInstead" }, | 436 | .c_ident = { "CyrixInstead" }, |
438 | .c_init = init_cyrix, | 437 | .c_init = init_cyrix, |
439 | .c_identify = cyrix_identify, | 438 | .c_identify = cyrix_identify, |
440 | }; | 439 | }; |
441 | 440 | ||
442 | int __init cyrix_init_cpu(void) | 441 | cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); |
443 | { | ||
444 | cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev; | ||
445 | return 0; | ||
446 | } | ||
447 | 442 | ||
448 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { | 443 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { |
449 | .c_vendor = "NSC", | 444 | .c_vendor = "NSC", |
450 | .c_ident = { "Geode by NSC" }, | 445 | .c_ident = { "Geode by NSC" }, |
451 | .c_init = init_nsc, | 446 | .c_init = init_nsc, |
452 | }; | 447 | }; |
453 | 448 | ||
454 | int __init nsc_init_cpu(void) | 449 | cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); |
455 | { | ||
456 | cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev; | ||
457 | return 0; | ||
458 | } | ||
459 | |||
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c index ee975ac6bbcb..e43ad4ad4cba 100644 --- a/arch/x86/kernel/cpu/feature_names.c +++ b/arch/x86/kernel/cpu/feature_names.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * This file must not contain any executable code. | 4 | * This file must not contain any executable code. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "asm/cpufeature.h" | 7 | #include <asm/cpufeature.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * These flag bits must match the definitions in <asm/cpufeature.h>. | 10 | * These flag bits must match the definitions in <asm/cpufeature.h>. |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index fae31ce747bd..fe9224c51d37 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -30,7 +30,7 @@ | |||
30 | struct movsl_mask movsl_mask __read_mostly; | 30 | struct movsl_mask movsl_mask __read_mostly; |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 33 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
34 | { | 34 | { |
35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | 35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ |
36 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | 36 | if (c->x86 == 15 && c->x86_cache_alignment == 64) |
@@ -45,7 +45,7 @@ void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
45 | * | 45 | * |
46 | * This is called before we do cpu ident work | 46 | * This is called before we do cpu ident work |
47 | */ | 47 | */ |
48 | 48 | ||
49 | int __cpuinit ppro_with_ram_bug(void) | 49 | int __cpuinit ppro_with_ram_bug(void) |
50 | { | 50 | { |
51 | /* Uses data from early_cpu_detect now */ | 51 | /* Uses data from early_cpu_detect now */ |
@@ -58,7 +58,7 @@ int __cpuinit ppro_with_ram_bug(void) | |||
58 | } | 58 | } |
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | 62 | ||
63 | /* | 63 | /* |
64 | * P4 Xeon errata 037 workaround. | 64 | * P4 Xeon errata 037 workaround. |
@@ -69,7 +69,7 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
69 | unsigned long lo, hi; | 69 | unsigned long lo, hi; |
70 | 70 | ||
71 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | 71 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
72 | rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 72 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
73 | if ((lo & (1<<9)) == 0) { | 73 | if ((lo & (1<<9)) == 0) { |
74 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 74 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
75 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 75 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
@@ -127,10 +127,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
127 | */ | 127 | */ |
128 | c->f00f_bug = 0; | 128 | c->f00f_bug = 0; |
129 | if (!paravirt_enabled() && c->x86 == 5) { | 129 | if (!paravirt_enabled() && c->x86 == 5) { |
130 | static int f00f_workaround_enabled = 0; | 130 | static int f00f_workaround_enabled; |
131 | 131 | ||
132 | c->f00f_bug = 1; | 132 | c->f00f_bug = 1; |
133 | if ( !f00f_workaround_enabled ) { | 133 | if (!f00f_workaround_enabled) { |
134 | trap_init_f00f_bug(); | 134 | trap_init_f00f_bug(); |
135 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | 135 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); |
136 | f00f_workaround_enabled = 1; | 136 | f00f_workaround_enabled = 1; |
@@ -139,20 +139,22 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | l2 = init_intel_cacheinfo(c); | 141 | l2 = init_intel_cacheinfo(c); |
142 | if (c->cpuid_level > 9 ) { | 142 | if (c->cpuid_level > 9) { |
143 | unsigned eax = cpuid_eax(10); | 143 | unsigned eax = cpuid_eax(10); |
144 | /* Check for version and the number of counters */ | 144 | /* Check for version and the number of counters */ |
145 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | 145 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) |
146 | set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); | 146 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
147 | } | 147 | } |
148 | 148 | ||
149 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ | 149 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ |
150 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | 150 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) |
151 | clear_bit(X86_FEATURE_SEP, c->x86_capability); | 151 | clear_cpu_cap(c, X86_FEATURE_SEP); |
152 | 152 | ||
153 | /* Names for the Pentium II/Celeron processors | 153 | /* |
154 | detectable only by also checking the cache size. | 154 | * Names for the Pentium II/Celeron processors |
155 | Dixon is NOT a Celeron. */ | 155 | * detectable only by also checking the cache size. |
156 | * Dixon is NOT a Celeron. | ||
157 | */ | ||
156 | if (c->x86 == 6) { | 158 | if (c->x86 == 6) { |
157 | switch (c->x86_model) { | 159 | switch (c->x86_model) { |
158 | case 5: | 160 | case 5: |
@@ -163,14 +165,14 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
163 | p = "Mobile Pentium II (Dixon)"; | 165 | p = "Mobile Pentium II (Dixon)"; |
164 | } | 166 | } |
165 | break; | 167 | break; |
166 | 168 | ||
167 | case 6: | 169 | case 6: |
168 | if (l2 == 128) | 170 | if (l2 == 128) |
169 | p = "Celeron (Mendocino)"; | 171 | p = "Celeron (Mendocino)"; |
170 | else if (c->x86_mask == 0 || c->x86_mask == 5) | 172 | else if (c->x86_mask == 0 || c->x86_mask == 5) |
171 | p = "Celeron-A"; | 173 | p = "Celeron-A"; |
172 | break; | 174 | break; |
173 | 175 | ||
174 | case 8: | 176 | case 8: |
175 | if (l2 == 128) | 177 | if (l2 == 128) |
176 | p = "Celeron (Coppermine)"; | 178 | p = "Celeron (Coppermine)"; |
@@ -178,9 +180,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
178 | } | 180 | } |
179 | } | 181 | } |
180 | 182 | ||
181 | if ( p ) | 183 | if (p) |
182 | strcpy(c->x86_model_id, p); | 184 | strcpy(c->x86_model_id, p); |
183 | 185 | ||
184 | c->x86_max_cores = num_cpu_cores(c); | 186 | c->x86_max_cores = num_cpu_cores(c); |
185 | 187 | ||
186 | detect_ht(c); | 188 | detect_ht(c); |
@@ -207,28 +209,29 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
207 | #endif | 209 | #endif |
208 | 210 | ||
209 | if (cpu_has_xmm2) | 211 | if (cpu_has_xmm2) |
210 | set_bit(X86_FEATURE_LFENCE_RDTSC, c->x86_capability); | 212 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
211 | if (c->x86 == 15) { | 213 | if (c->x86 == 15) { |
212 | set_bit(X86_FEATURE_P4, c->x86_capability); | 214 | set_cpu_cap(c, X86_FEATURE_P4); |
213 | } | 215 | } |
214 | if (c->x86 == 6) | 216 | if (c->x86 == 6) |
215 | set_bit(X86_FEATURE_P3, c->x86_capability); | 217 | set_cpu_cap(c, X86_FEATURE_P3); |
216 | if (cpu_has_ds) { | 218 | if (cpu_has_ds) { |
217 | unsigned int l1; | 219 | unsigned int l1; |
218 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | 220 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); |
219 | if (!(l1 & (1<<11))) | 221 | if (!(l1 & (1<<11))) |
220 | set_bit(X86_FEATURE_BTS, c->x86_capability); | 222 | set_cpu_cap(c, X86_FEATURE_BTS); |
221 | if (!(l1 & (1<<12))) | 223 | if (!(l1 & (1<<12))) |
222 | set_bit(X86_FEATURE_PEBS, c->x86_capability); | 224 | set_cpu_cap(c, X86_FEATURE_PEBS); |
223 | } | 225 | } |
224 | 226 | ||
225 | if (cpu_has_bts) | 227 | if (cpu_has_bts) |
226 | ds_init_intel(c); | 228 | ds_init_intel(c); |
227 | } | 229 | } |
228 | 230 | ||
229 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) | 231 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
230 | { | 232 | { |
231 | /* Intel PIII Tualatin. This comes in two flavours. | 233 | /* |
234 | * Intel PIII Tualatin. This comes in two flavours. | ||
232 | * One has 256kb of cache, the other 512. We have no way | 235 | * One has 256kb of cache, the other 512. We have no way |
233 | * to determine which, so we use a boottime override | 236 | * to determine which, so we use a boottime override |
234 | * for the 512kb model, and assume 256 otherwise. | 237 | * for the 512kb model, and assume 256 otherwise. |
@@ -240,42 +243,42 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned | |||
240 | 243 | ||
241 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | 244 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
242 | .c_vendor = "Intel", | 245 | .c_vendor = "Intel", |
243 | .c_ident = { "GenuineIntel" }, | 246 | .c_ident = { "GenuineIntel" }, |
244 | .c_models = { | 247 | .c_models = { |
245 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = | 248 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
246 | { | 249 | { |
247 | [0] = "486 DX-25/33", | 250 | [0] = "486 DX-25/33", |
248 | [1] = "486 DX-50", | 251 | [1] = "486 DX-50", |
249 | [2] = "486 SX", | 252 | [2] = "486 SX", |
250 | [3] = "486 DX/2", | 253 | [3] = "486 DX/2", |
251 | [4] = "486 SL", | 254 | [4] = "486 SL", |
252 | [5] = "486 SX/2", | 255 | [5] = "486 SX/2", |
253 | [7] = "486 DX/2-WB", | 256 | [7] = "486 DX/2-WB", |
254 | [8] = "486 DX/4", | 257 | [8] = "486 DX/4", |
255 | [9] = "486 DX/4-WB" | 258 | [9] = "486 DX/4-WB" |
256 | } | 259 | } |
257 | }, | 260 | }, |
258 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = | 261 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = |
259 | { | 262 | { |
260 | [0] = "Pentium 60/66 A-step", | 263 | [0] = "Pentium 60/66 A-step", |
261 | [1] = "Pentium 60/66", | 264 | [1] = "Pentium 60/66", |
262 | [2] = "Pentium 75 - 200", | 265 | [2] = "Pentium 75 - 200", |
263 | [3] = "OverDrive PODP5V83", | 266 | [3] = "OverDrive PODP5V83", |
264 | [4] = "Pentium MMX", | 267 | [4] = "Pentium MMX", |
265 | [7] = "Mobile Pentium 75 - 200", | 268 | [7] = "Mobile Pentium 75 - 200", |
266 | [8] = "Mobile Pentium MMX" | 269 | [8] = "Mobile Pentium MMX" |
267 | } | 270 | } |
268 | }, | 271 | }, |
269 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = | 272 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = |
270 | { | 273 | { |
271 | [0] = "Pentium Pro A-step", | 274 | [0] = "Pentium Pro A-step", |
272 | [1] = "Pentium Pro", | 275 | [1] = "Pentium Pro", |
273 | [3] = "Pentium II (Klamath)", | 276 | [3] = "Pentium II (Klamath)", |
274 | [4] = "Pentium II (Deschutes)", | 277 | [4] = "Pentium II (Deschutes)", |
275 | [5] = "Pentium II (Deschutes)", | 278 | [5] = "Pentium II (Deschutes)", |
276 | [6] = "Mobile Pentium II", | 279 | [6] = "Mobile Pentium II", |
277 | [7] = "Pentium III (Katmai)", | 280 | [7] = "Pentium III (Katmai)", |
278 | [8] = "Pentium III (Coppermine)", | 281 | [8] = "Pentium III (Coppermine)", |
279 | [10] = "Pentium III (Cascades)", | 282 | [10] = "Pentium III (Cascades)", |
280 | [11] = "Pentium III (Tualatin)", | 283 | [11] = "Pentium III (Tualatin)", |
281 | } | 284 | } |
@@ -290,15 +293,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
290 | } | 293 | } |
291 | }, | 294 | }, |
292 | }, | 295 | }, |
296 | .c_early_init = early_init_intel, | ||
293 | .c_init = init_intel, | 297 | .c_init = init_intel, |
294 | .c_size_cache = intel_size_cache, | 298 | .c_size_cache = intel_size_cache, |
295 | }; | 299 | }; |
296 | 300 | ||
297 | __init int intel_cpu_init(void) | 301 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); |
298 | { | ||
299 | cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; | ||
300 | return 0; | ||
301 | } | ||
302 | 302 | ||
303 | #ifndef CONFIG_X86_CMPXCHG | 303 | #ifndef CONFIG_X86_CMPXCHG |
304 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | 304 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) |
@@ -364,5 +364,5 @@ unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | |||
364 | EXPORT_SYMBOL(cmpxchg_486_u64); | 364 | EXPORT_SYMBOL(cmpxchg_486_u64); |
365 | #endif | 365 | #endif |
366 | 366 | ||
367 | // arch_initcall(intel_cpu_init); | 367 | /* arch_initcall(intel_cpu_init); */ |
368 | 368 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index a5182dcd94ae..774d87cfd8cd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c | |||
@@ -10,20 +10,20 @@ | |||
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | #include <linux/thread_info.h> | 11 | #include <linux/thread_info.h> |
12 | 12 | ||
13 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
14 | #include <asm/system.h> | 14 | #include <asm/system.h> |
15 | #include <asm/mce.h> | 15 | #include <asm/mce.h> |
16 | 16 | ||
17 | #include "mce.h" | 17 | #include "mce.h" |
18 | 18 | ||
19 | int mce_disabled = 0; | 19 | int mce_disabled; |
20 | int nr_mce_banks; | 20 | int nr_mce_banks; |
21 | 21 | ||
22 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ | 22 | EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ |
23 | 23 | ||
24 | /* Handle unconfigured int18 (should never happen) */ | 24 | /* Handle unconfigured int18 (should never happen) */ |
25 | static void unexpected_machine_check(struct pt_regs * regs, long error_code) | 25 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) |
26 | { | 26 | { |
27 | printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id()); | 27 | printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id()); |
28 | } | 28 | } |
29 | 29 | ||
@@ -33,30 +33,30 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_mac | |||
33 | /* This has to be run for each processor */ | 33 | /* This has to be run for each processor */ |
34 | void mcheck_init(struct cpuinfo_x86 *c) | 34 | void mcheck_init(struct cpuinfo_x86 *c) |
35 | { | 35 | { |
36 | if (mce_disabled==1) | 36 | if (mce_disabled == 1) |
37 | return; | 37 | return; |
38 | 38 | ||
39 | switch (c->x86_vendor) { | 39 | switch (c->x86_vendor) { |
40 | case X86_VENDOR_AMD: | 40 | case X86_VENDOR_AMD: |
41 | amd_mcheck_init(c); | 41 | amd_mcheck_init(c); |
42 | break; | 42 | break; |
43 | 43 | ||
44 | case X86_VENDOR_INTEL: | 44 | case X86_VENDOR_INTEL: |
45 | if (c->x86==5) | 45 | if (c->x86 == 5) |
46 | intel_p5_mcheck_init(c); | 46 | intel_p5_mcheck_init(c); |
47 | if (c->x86==6) | 47 | if (c->x86 == 6) |
48 | intel_p6_mcheck_init(c); | 48 | intel_p6_mcheck_init(c); |
49 | if (c->x86==15) | 49 | if (c->x86 == 15) |
50 | intel_p4_mcheck_init(c); | 50 | intel_p4_mcheck_init(c); |
51 | break; | 51 | break; |
52 | 52 | ||
53 | case X86_VENDOR_CENTAUR: | 53 | case X86_VENDOR_CENTAUR: |
54 | if (c->x86==5) | 54 | if (c->x86 == 5) |
55 | winchip_mcheck_init(c); | 55 | winchip_mcheck_init(c); |
56 | break; | 56 | break; |
57 | 57 | ||
58 | default: | 58 | default: |
59 | break; | 59 | break; |
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index bf39409b3838..00ccb6c14ec2 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | 18 | ||
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <asm/msr.h> | 21 | #include <asm/msr.h> |
22 | 22 | ||
@@ -26,23 +26,26 @@ static int firstbank; | |||
26 | 26 | ||
27 | #define MCE_RATE 15*HZ /* timer rate is 15s */ | 27 | #define MCE_RATE 15*HZ /* timer rate is 15s */ |
28 | 28 | ||
29 | static void mce_checkregs (void *info) | 29 | static void mce_checkregs(void *info) |
30 | { | 30 | { |
31 | u32 low, high; | 31 | u32 low, high; |
32 | int i; | 32 | int i; |
33 | 33 | ||
34 | for (i=firstbank; i<nr_mce_banks; i++) { | 34 | for (i = firstbank; i < nr_mce_banks; i++) { |
35 | rdmsr (MSR_IA32_MC0_STATUS+i*4, low, high); | 35 | rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); |
36 | 36 | ||
37 | if (high & (1<<31)) { | 37 | if (high & (1<<31)) { |
38 | printk(KERN_INFO "MCE: The hardware reports a non " | 38 | printk(KERN_INFO "MCE: The hardware reports a non " |
39 | "fatal, correctable incident occurred on " | 39 | "fatal, correctable incident occurred on " |
40 | "CPU %d.\n", | 40 | "CPU %d.\n", |
41 | smp_processor_id()); | 41 | smp_processor_id()); |
42 | printk (KERN_INFO "Bank %d: %08x%08x\n", i, high, low); | 42 | printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low); |
43 | 43 | ||
44 | /* Scrub the error so we don't pick it up in MCE_RATE seconds time. */ | 44 | /* |
45 | wrmsr (MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); | 45 | * Scrub the error so we don't pick it up in MCE_RATE |
46 | * seconds time. | ||
47 | */ | ||
48 | wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL); | ||
46 | 49 | ||
47 | /* Serialize */ | 50 | /* Serialize */ |
48 | wmb(); | 51 | wmb(); |
@@ -55,10 +58,10 @@ static void mce_work_fn(struct work_struct *work); | |||
55 | static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); | 58 | static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); |
56 | 59 | ||
57 | static void mce_work_fn(struct work_struct *work) | 60 | static void mce_work_fn(struct work_struct *work) |
58 | { | 61 | { |
59 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 62 | on_each_cpu(mce_checkregs, NULL, 1, 1); |
60 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); | 63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); |
61 | } | 64 | } |
62 | 65 | ||
63 | static int __init init_nonfatal_mce_checker(void) | 66 | static int __init init_nonfatal_mce_checker(void) |
64 | { | 67 | { |
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index a18310aaae0c..bfa5817afdda 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c | |||
@@ -9,20 +9,20 @@ | |||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | 11 | ||
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
15 | 15 | ||
16 | #include "mce.h" | 16 | #include "mce.h" |
17 | 17 | ||
18 | /* Machine check handler for Pentium class Intel */ | 18 | /* Machine check handler for Pentium class Intel */ |
19 | static void pentium_machine_check(struct pt_regs * regs, long error_code) | 19 | static void pentium_machine_check(struct pt_regs *regs, long error_code) |
20 | { | 20 | { |
21 | u32 loaddr, hi, lotype; | 21 | u32 loaddr, hi, lotype; |
22 | rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); | 22 | rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); |
23 | rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); | 23 | rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); |
24 | printk(KERN_EMERG "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype); | 24 | printk(KERN_EMERG "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", smp_processor_id(), loaddr, lotype); |
25 | if(lotype&(1<<5)) | 25 | if (lotype&(1<<5)) |
26 | printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id()); | 26 | printk(KERN_EMERG "CPU#%d: Possible thermal failure (CPU on fire ?).\n", smp_processor_id()); |
27 | add_taint(TAINT_MACHINE_CHECK); | 27 | add_taint(TAINT_MACHINE_CHECK); |
28 | } | 28 | } |
@@ -31,13 +31,13 @@ static void pentium_machine_check(struct pt_regs * regs, long error_code) | |||
31 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c) | 31 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c) |
32 | { | 32 | { |
33 | u32 l, h; | 33 | u32 l, h; |
34 | 34 | ||
35 | /*Check for MCE support */ | 35 | /*Check for MCE support */ |
36 | if( !cpu_has(c, X86_FEATURE_MCE) ) | 36 | if (!cpu_has(c, X86_FEATURE_MCE)) |
37 | return; | 37 | return; |
38 | 38 | ||
39 | /* Default P5 to off as its often misconnected */ | 39 | /* Default P5 to off as its often misconnected */ |
40 | if(mce_disabled != -1) | 40 | if (mce_disabled != -1) |
41 | return; | 41 | return; |
42 | machine_check_vector = pentium_machine_check; | 42 | machine_check_vector = pentium_machine_check; |
43 | wmb(); | 43 | wmb(); |
@@ -47,7 +47,7 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) | |||
47 | rdmsr(MSR_IA32_P5_MC_TYPE, l, h); | 47 | rdmsr(MSR_IA32_P5_MC_TYPE, l, h); |
48 | printk(KERN_INFO "Intel old style machine check architecture supported.\n"); | 48 | printk(KERN_INFO "Intel old style machine check architecture supported.\n"); |
49 | 49 | ||
50 | /* Enable MCE */ | 50 | /* Enable MCE */ |
51 | set_in_cr4(X86_CR4_MCE); | 51 | set_in_cr4(X86_CR4_MCE); |
52 | printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id()); | 52 | printk(KERN_INFO "Intel old style machine check reporting enabled on CPU#%d.\n", smp_processor_id()); |
53 | } | 53 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c index 74342604d30e..62efc9c2b3af 100644 --- a/arch/x86/kernel/cpu/mcheck/p6.c +++ b/arch/x86/kernel/cpu/mcheck/p6.c | |||
@@ -9,23 +9,23 @@ | |||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | 11 | ||
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
15 | 15 | ||
16 | #include "mce.h" | 16 | #include "mce.h" |
17 | 17 | ||
18 | /* Machine Check Handler For PII/PIII */ | 18 | /* Machine Check Handler For PII/PIII */ |
19 | static void intel_machine_check(struct pt_regs * regs, long error_code) | 19 | static void intel_machine_check(struct pt_regs *regs, long error_code) |
20 | { | 20 | { |
21 | int recover=1; | 21 | int recover = 1; |
22 | u32 alow, ahigh, high, low; | 22 | u32 alow, ahigh, high, low; |
23 | u32 mcgstl, mcgsth; | 23 | u32 mcgstl, mcgsth; |
24 | int i; | 24 | int i; |
25 | 25 | ||
26 | rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 26 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
27 | if (mcgstl & (1<<0)) /* Recoverable ? */ | 27 | if (mcgstl & (1<<0)) /* Recoverable ? */ |
28 | recover=0; | 28 | recover = 0; |
29 | 29 | ||
30 | printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", | 30 | printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", |
31 | smp_processor_id(), mcgsth, mcgstl); | 31 | smp_processor_id(), mcgsth, mcgstl); |
@@ -55,30 +55,30 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) | |||
55 | } | 55 | } |
56 | 56 | ||
57 | if (recover & 2) | 57 | if (recover & 2) |
58 | panic ("CPU context corrupt"); | 58 | panic("CPU context corrupt"); |
59 | if (recover & 1) | 59 | if (recover & 1) |
60 | panic ("Unable to continue"); | 60 | panic("Unable to continue"); |
61 | 61 | ||
62 | printk (KERN_EMERG "Attempting to continue.\n"); | 62 | printk(KERN_EMERG "Attempting to continue.\n"); |
63 | /* | 63 | /* |
64 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not | 64 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not |
65 | * recoverable/continuable.This will allow BIOS to look at the MSRs | 65 | * recoverable/continuable.This will allow BIOS to look at the MSRs |
66 | * for errors if the OS could not log the error. | 66 | * for errors if the OS could not log the error. |
67 | */ | 67 | */ |
68 | for (i=0; i<nr_mce_banks; i++) { | 68 | for (i = 0; i < nr_mce_banks; i++) { |
69 | unsigned int msr; | 69 | unsigned int msr; |
70 | msr = MSR_IA32_MC0_STATUS+i*4; | 70 | msr = MSR_IA32_MC0_STATUS+i*4; |
71 | rdmsr (msr,low, high); | 71 | rdmsr(msr, low, high); |
72 | if (high & (1<<31)) { | 72 | if (high & (1<<31)) { |
73 | /* Clear it */ | 73 | /* Clear it */ |
74 | wrmsr (msr, 0UL, 0UL); | 74 | wrmsr(msr, 0UL, 0UL); |
75 | /* Serialize */ | 75 | /* Serialize */ |
76 | wmb(); | 76 | wmb(); |
77 | add_taint(TAINT_MACHINE_CHECK); | 77 | add_taint(TAINT_MACHINE_CHECK); |
78 | } | 78 | } |
79 | } | 79 | } |
80 | mcgstl &= ~(1<<2); | 80 | mcgstl &= ~(1<<2); |
81 | wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth); | 81 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
82 | } | 82 | } |
83 | 83 | ||
84 | /* Set up machine check reporting for processors with Intel style MCE */ | 84 | /* Set up machine check reporting for processors with Intel style MCE */ |
@@ -86,21 +86,21 @@ void intel_p6_mcheck_init(struct cpuinfo_x86 *c) | |||
86 | { | 86 | { |
87 | u32 l, h; | 87 | u32 l, h; |
88 | int i; | 88 | int i; |
89 | 89 | ||
90 | /* Check for MCE support */ | 90 | /* Check for MCE support */ |
91 | if (!cpu_has(c, X86_FEATURE_MCE)) | 91 | if (!cpu_has(c, X86_FEATURE_MCE)) |
92 | return; | 92 | return; |
93 | 93 | ||
94 | /* Check for PPro style MCA */ | 94 | /* Check for PPro style MCA */ |
95 | if (!cpu_has(c, X86_FEATURE_MCA)) | 95 | if (!cpu_has(c, X86_FEATURE_MCA)) |
96 | return; | 96 | return; |
97 | 97 | ||
98 | /* Ok machine check is available */ | 98 | /* Ok machine check is available */ |
99 | machine_check_vector = intel_machine_check; | 99 | machine_check_vector = intel_machine_check; |
100 | wmb(); | 100 | wmb(); |
101 | 101 | ||
102 | printk (KERN_INFO "Intel machine check architecture supported.\n"); | 102 | printk(KERN_INFO "Intel machine check architecture supported.\n"); |
103 | rdmsr (MSR_IA32_MCG_CAP, l, h); | 103 | rdmsr(MSR_IA32_MCG_CAP, l, h); |
104 | if (l & (1<<8)) /* Control register present ? */ | 104 | if (l & (1<<8)) /* Control register present ? */ |
105 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 105 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
106 | nr_mce_banks = l & 0xff; | 106 | nr_mce_banks = l & 0xff; |
@@ -110,13 +110,13 @@ void intel_p6_mcheck_init(struct cpuinfo_x86 *c) | |||
110 | * - MC0_CTL should not be written | 110 | * - MC0_CTL should not be written |
111 | * - Status registers on all banks should be cleared on reset | 111 | * - Status registers on all banks should be cleared on reset |
112 | */ | 112 | */ |
113 | for (i=1; i<nr_mce_banks; i++) | 113 | for (i = 1; i < nr_mce_banks; i++) |
114 | wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); | 114 | wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); |
115 | 115 | ||
116 | for (i=0; i<nr_mce_banks; i++) | 116 | for (i = 0; i < nr_mce_banks; i++) |
117 | wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); | 117 | wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); |
118 | 118 | ||
119 | set_in_cr4 (X86_CR4_MCE); | 119 | set_in_cr4(X86_CR4_MCE); |
120 | printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", | 120 | printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", |
121 | smp_processor_id()); | 121 | smp_processor_id()); |
122 | } | 122 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index 3d428d5afc52..f2be3e190c6b 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c | |||
@@ -8,14 +8,14 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | 10 | ||
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
14 | 14 | ||
15 | #include "mce.h" | 15 | #include "mce.h" |
16 | 16 | ||
17 | /* Machine check handler for WinChip C6 */ | 17 | /* Machine check handler for WinChip C6 */ |
18 | static void winchip_machine_check(struct pt_regs * regs, long error_code) | 18 | static void winchip_machine_check(struct pt_regs *regs, long error_code) |
19 | { | 19 | { |
20 | printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); | 20 | printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); |
21 | add_taint(TAINT_MACHINE_CHECK); | 21 | add_taint(TAINT_MACHINE_CHECK); |
@@ -28,8 +28,8 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) | |||
28 | machine_check_vector = winchip_machine_check; | 28 | machine_check_vector = winchip_machine_check; |
29 | wmb(); | 29 | wmb(); |
30 | rdmsr(MSR_IDT_FCR1, lo, hi); | 30 | rdmsr(MSR_IDT_FCR1, lo, hi); |
31 | lo|= (1<<2); /* Enable EIERRINT (int 18 MCE) */ | 31 | lo |= (1<<2); /* Enable EIERRINT (int 18 MCE) */ |
32 | lo&= ~(1<<4); /* Enable MCE */ | 32 | lo &= ~(1<<4); /* Enable MCE */ |
33 | wrmsr(MSR_IDT_FCR1, lo, hi); | 33 | wrmsr(MSR_IDT_FCR1, lo, hi); |
34 | set_in_cr4(X86_CR4_MCE); | 34 | set_in_cr4(X86_CR4_MCE); |
35 | printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n"); | 35 | printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n"); |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 3e18db4cefee..353efe4f5017 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <asm/cpufeature.h> | 11 | #include <asm/cpufeature.h> |
12 | #include <asm/processor-flags.h> | 12 | #include <asm/processor-flags.h> |
13 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
14 | #include <asm/pat.h> | ||
14 | #include "mtrr.h" | 15 | #include "mtrr.h" |
15 | 16 | ||
16 | struct mtrr_state { | 17 | struct mtrr_state { |
@@ -35,6 +36,8 @@ static struct fixed_range_block fixed_range_blocks[] = { | |||
35 | 36 | ||
36 | static unsigned long smp_changes_mask; | 37 | static unsigned long smp_changes_mask; |
37 | static struct mtrr_state mtrr_state = {}; | 38 | static struct mtrr_state mtrr_state = {}; |
39 | static int mtrr_state_set; | ||
40 | static u64 tom2; | ||
38 | 41 | ||
39 | #undef MODULE_PARAM_PREFIX | 42 | #undef MODULE_PARAM_PREFIX |
40 | #define MODULE_PARAM_PREFIX "mtrr." | 43 | #define MODULE_PARAM_PREFIX "mtrr." |
@@ -42,6 +45,111 @@ static struct mtrr_state mtrr_state = {}; | |||
42 | static int mtrr_show; | 45 | static int mtrr_show; |
43 | module_param_named(show, mtrr_show, bool, 0); | 46 | module_param_named(show, mtrr_show, bool, 0); |
44 | 47 | ||
48 | /* | ||
49 | * Returns the effective MTRR type for the region | ||
50 | * Error returns: | ||
51 | * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR | ||
52 | * - 0xFF - when MTRR is not enabled | ||
53 | */ | ||
54 | u8 mtrr_type_lookup(u64 start, u64 end) | ||
55 | { | ||
56 | int i; | ||
57 | u64 base, mask; | ||
58 | u8 prev_match, curr_match; | ||
59 | |||
60 | if (!mtrr_state_set) | ||
61 | return 0xFF; | ||
62 | |||
63 | if (!mtrr_state.enabled) | ||
64 | return 0xFF; | ||
65 | |||
66 | /* Make end inclusive end, instead of exclusive */ | ||
67 | end--; | ||
68 | |||
69 | /* Look in fixed ranges. Just return the type as per start */ | ||
70 | if (mtrr_state.have_fixed && (start < 0x100000)) { | ||
71 | int idx; | ||
72 | |||
73 | if (start < 0x80000) { | ||
74 | idx = 0; | ||
75 | idx += (start >> 16); | ||
76 | return mtrr_state.fixed_ranges[idx]; | ||
77 | } else if (start < 0xC0000) { | ||
78 | idx = 1 * 8; | ||
79 | idx += ((start - 0x80000) >> 14); | ||
80 | return mtrr_state.fixed_ranges[idx]; | ||
81 | } else if (start < 0x1000000) { | ||
82 | idx = 3 * 8; | ||
83 | idx += ((start - 0xC0000) >> 12); | ||
84 | return mtrr_state.fixed_ranges[idx]; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Look in variable ranges | ||
90 | * Look of multiple ranges matching this address and pick type | ||
91 | * as per MTRR precedence | ||
92 | */ | ||
93 | if (!mtrr_state.enabled & 2) { | ||
94 | return mtrr_state.def_type; | ||
95 | } | ||
96 | |||
97 | prev_match = 0xFF; | ||
98 | for (i = 0; i < num_var_ranges; ++i) { | ||
99 | unsigned short start_state, end_state; | ||
100 | |||
101 | if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) | ||
102 | continue; | ||
103 | |||
104 | base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + | ||
105 | (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); | ||
106 | mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + | ||
107 | (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); | ||
108 | |||
109 | start_state = ((start & mask) == (base & mask)); | ||
110 | end_state = ((end & mask) == (base & mask)); | ||
111 | if (start_state != end_state) | ||
112 | return 0xFE; | ||
113 | |||
114 | if ((start & mask) != (base & mask)) { | ||
115 | continue; | ||
116 | } | ||
117 | |||
118 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; | ||
119 | if (prev_match == 0xFF) { | ||
120 | prev_match = curr_match; | ||
121 | continue; | ||
122 | } | ||
123 | |||
124 | if (prev_match == MTRR_TYPE_UNCACHABLE || | ||
125 | curr_match == MTRR_TYPE_UNCACHABLE) { | ||
126 | return MTRR_TYPE_UNCACHABLE; | ||
127 | } | ||
128 | |||
129 | if ((prev_match == MTRR_TYPE_WRBACK && | ||
130 | curr_match == MTRR_TYPE_WRTHROUGH) || | ||
131 | (prev_match == MTRR_TYPE_WRTHROUGH && | ||
132 | curr_match == MTRR_TYPE_WRBACK)) { | ||
133 | prev_match = MTRR_TYPE_WRTHROUGH; | ||
134 | curr_match = MTRR_TYPE_WRTHROUGH; | ||
135 | } | ||
136 | |||
137 | if (prev_match != curr_match) { | ||
138 | return MTRR_TYPE_UNCACHABLE; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | if (tom2) { | ||
143 | if (start >= (1ULL<<32) && (end < tom2)) | ||
144 | return MTRR_TYPE_WRBACK; | ||
145 | } | ||
146 | |||
147 | if (prev_match != 0xFF) | ||
148 | return prev_match; | ||
149 | |||
150 | return mtrr_state.def_type; | ||
151 | } | ||
152 | |||
45 | /* Get the MSR pair relating to a var range */ | 153 | /* Get the MSR pair relating to a var range */ |
46 | static void | 154 | static void |
47 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 155 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
@@ -79,12 +187,16 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) | |||
79 | base, base + step - 1, mtrr_attrib_to_str(*types)); | 187 | base, base + step - 1, mtrr_attrib_to_str(*types)); |
80 | } | 188 | } |
81 | 189 | ||
190 | static void prepare_set(void); | ||
191 | static void post_set(void); | ||
192 | |||
82 | /* Grab all of the MTRR state for this CPU into *state */ | 193 | /* Grab all of the MTRR state for this CPU into *state */ |
83 | void __init get_mtrr_state(void) | 194 | void __init get_mtrr_state(void) |
84 | { | 195 | { |
85 | unsigned int i; | 196 | unsigned int i; |
86 | struct mtrr_var_range *vrs; | 197 | struct mtrr_var_range *vrs; |
87 | unsigned lo, dummy; | 198 | unsigned lo, dummy; |
199 | unsigned long flags; | ||
88 | 200 | ||
89 | vrs = mtrr_state.var_ranges; | 201 | vrs = mtrr_state.var_ranges; |
90 | 202 | ||
@@ -100,6 +212,15 @@ void __init get_mtrr_state(void) | |||
100 | mtrr_state.def_type = (lo & 0xff); | 212 | mtrr_state.def_type = (lo & 0xff); |
101 | mtrr_state.enabled = (lo & 0xc00) >> 10; | 213 | mtrr_state.enabled = (lo & 0xc00) >> 10; |
102 | 214 | ||
215 | if (amd_special_default_mtrr()) { | ||
216 | unsigned lo, hi; | ||
217 | /* TOP_MEM2 */ | ||
218 | rdmsr(MSR_K8_TOP_MEM2, lo, hi); | ||
219 | tom2 = hi; | ||
220 | tom2 <<= 32; | ||
221 | tom2 |= lo; | ||
222 | tom2 &= 0xffffff8000000ULL; | ||
223 | } | ||
103 | if (mtrr_show) { | 224 | if (mtrr_show) { |
104 | int high_width; | 225 | int high_width; |
105 | 226 | ||
@@ -130,7 +251,22 @@ void __init get_mtrr_state(void) | |||
130 | else | 251 | else |
131 | printk(KERN_INFO "MTRR %u disabled\n", i); | 252 | printk(KERN_INFO "MTRR %u disabled\n", i); |
132 | } | 253 | } |
254 | if (tom2) { | ||
255 | printk(KERN_INFO "TOM2: %016llx aka %lldM\n", | ||
256 | tom2, tom2>>20); | ||
257 | } | ||
133 | } | 258 | } |
259 | mtrr_state_set = 1; | ||
260 | |||
261 | /* PAT setup for BP. We need to go through sync steps here */ | ||
262 | local_irq_save(flags); | ||
263 | prepare_set(); | ||
264 | |||
265 | pat_init(); | ||
266 | |||
267 | post_set(); | ||
268 | local_irq_restore(flags); | ||
269 | |||
134 | } | 270 | } |
135 | 271 | ||
136 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ | 272 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ |
@@ -397,6 +533,9 @@ static void generic_set_all(void) | |||
397 | /* Actually set the state */ | 533 | /* Actually set the state */ |
398 | mask = set_mtrr_state(); | 534 | mask = set_mtrr_state(); |
399 | 535 | ||
536 | /* also set PAT */ | ||
537 | pat_init(); | ||
538 | |||
400 | post_set(); | 539 | post_set(); |
401 | local_irq_restore(flags); | 540 | local_irq_restore(flags); |
402 | 541 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 91e150acb46c..1960f1985e5e 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -424,11 +424,10 @@ static int __init mtrr_if_init(void) | |||
424 | return -ENODEV; | 424 | return -ENODEV; |
425 | 425 | ||
426 | proc_root_mtrr = | 426 | proc_root_mtrr = |
427 | create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root); | 427 | proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops); |
428 | if (proc_root_mtrr) { | 428 | |
429 | if (proc_root_mtrr) | ||
429 | proc_root_mtrr->owner = THIS_MODULE; | 430 | proc_root_mtrr->owner = THIS_MODULE; |
430 | proc_root_mtrr->proc_fops = &mtrr_fops; | ||
431 | } | ||
432 | return 0; | 431 | return 0; |
433 | } | 432 | } |
434 | 433 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index a6450b3ae759..6a1e278d9323 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -627,7 +627,7 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup); | |||
627 | #define Tom2Enabled (1U << 21) | 627 | #define Tom2Enabled (1U << 21) |
628 | #define Tom2ForceMemTypeWB (1U << 22) | 628 | #define Tom2ForceMemTypeWB (1U << 22) |
629 | 629 | ||
630 | static __init int amd_special_default_mtrr(void) | 630 | int __init amd_special_default_mtrr(void) |
631 | { | 631 | { |
632 | u32 l, h; | 632 | u32 l, h; |
633 | 633 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c index 9f8ba923d1c9..7f7e2753685b 100644 --- a/arch/x86/kernel/cpu/mtrr/state.c +++ b/arch/x86/kernel/cpu/mtrr/state.c | |||
@@ -19,13 +19,15 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | |||
19 | if (use_intel() || is_cpu(CYRIX)) { | 19 | if (use_intel() || is_cpu(CYRIX)) { |
20 | 20 | ||
21 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 21 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
22 | if ( cpu_has_pge ) { | 22 | if (cpu_has_pge) { |
23 | ctxt->cr4val = read_cr4(); | 23 | ctxt->cr4val = read_cr4(); |
24 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); | 24 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); |
25 | } | 25 | } |
26 | 26 | ||
27 | /* Disable and flush caches. Note that wbinvd flushes the TLBs as | 27 | /* |
28 | a side-effect */ | 28 | * Disable and flush caches. Note that wbinvd flushes the TLBs |
29 | * as a side-effect | ||
30 | */ | ||
29 | cr0 = read_cr0() | X86_CR0_CD; | 31 | cr0 = read_cr0() | X86_CR0_CD; |
30 | wbinvd(); | 32 | wbinvd(); |
31 | write_cr0(cr0); | 33 | write_cr0(cr0); |
@@ -42,7 +44,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | |||
42 | 44 | ||
43 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) | 45 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) |
44 | { | 46 | { |
45 | if (use_intel()) | 47 | if (use_intel()) |
46 | /* Disable MTRRs, and set the default type to uncached */ | 48 | /* Disable MTRRs, and set the default type to uncached */ |
47 | mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, | 49 | mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, |
48 | ctxt->deftype_hi); | 50 | ctxt->deftype_hi); |
@@ -66,12 +68,12 @@ void set_mtrr_done(struct set_mtrr_context *ctxt) | |||
66 | else | 68 | else |
67 | /* Cyrix ARRs - everything else was excluded at the top */ | 69 | /* Cyrix ARRs - everything else was excluded at the top */ |
68 | setCx86(CX86_CCR3, ctxt->ccr3); | 70 | setCx86(CX86_CCR3, ctxt->ccr3); |
69 | 71 | ||
70 | /* Enable caches */ | 72 | /* Enable caches */ |
71 | write_cr0(read_cr0() & 0xbfffffff); | 73 | write_cr0(read_cr0() & 0xbfffffff); |
72 | 74 | ||
73 | /* Restore value of CR4 */ | 75 | /* Restore value of CR4 */ |
74 | if ( cpu_has_pge ) | 76 | if (cpu_has_pge) |
75 | write_cr4(ctxt->cr4val); | 77 | write_cr4(ctxt->cr4val); |
76 | } | 78 | } |
77 | /* Re-enable interrupts locally (if enabled previously) */ | 79 | /* Re-enable interrupts locally (if enabled previously) */ |
diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c index 961fbe1a748f..5d5e1c134123 100644 --- a/arch/x86/kernel/cpu/nexgen.c +++ b/arch/x86/kernel/cpu/nexgen.c | |||
@@ -9,11 +9,11 @@ | |||
9 | * Detect a NexGen CPU running without BIOS hypercode new enough | 9 | * Detect a NexGen CPU running without BIOS hypercode new enough |
10 | * to have CPUID. (Thanks to Herbert Oppmann) | 10 | * to have CPUID. (Thanks to Herbert Oppmann) |
11 | */ | 11 | */ |
12 | 12 | ||
13 | static int __cpuinit deep_magic_nexgen_probe(void) | 13 | static int __cpuinit deep_magic_nexgen_probe(void) |
14 | { | 14 | { |
15 | int ret; | 15 | int ret; |
16 | 16 | ||
17 | __asm__ __volatile__ ( | 17 | __asm__ __volatile__ ( |
18 | " movw $0x5555, %%ax\n" | 18 | " movw $0x5555, %%ax\n" |
19 | " xorw %%dx,%%dx\n" | 19 | " xorw %%dx,%%dx\n" |
@@ -22,22 +22,21 @@ static int __cpuinit deep_magic_nexgen_probe(void) | |||
22 | " movl $0, %%eax\n" | 22 | " movl $0, %%eax\n" |
23 | " jnz 1f\n" | 23 | " jnz 1f\n" |
24 | " movl $1, %%eax\n" | 24 | " movl $1, %%eax\n" |
25 | "1:\n" | 25 | "1:\n" |
26 | : "=a" (ret) : : "cx", "dx" ); | 26 | : "=a" (ret) : : "cx", "dx"); |
27 | return ret; | 27 | return ret; |
28 | } | 28 | } |
29 | 29 | ||
30 | static void __cpuinit init_nexgen(struct cpuinfo_x86 * c) | 30 | static void __cpuinit init_nexgen(struct cpuinfo_x86 *c) |
31 | { | 31 | { |
32 | c->x86_cache_size = 256; /* A few had 1 MB... */ | 32 | c->x86_cache_size = 256; /* A few had 1 MB... */ |
33 | } | 33 | } |
34 | 34 | ||
35 | static void __cpuinit nexgen_identify(struct cpuinfo_x86 * c) | 35 | static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c) |
36 | { | 36 | { |
37 | /* Detect NexGen with old hypercode */ | 37 | /* Detect NexGen with old hypercode */ |
38 | if ( deep_magic_nexgen_probe() ) { | 38 | if (deep_magic_nexgen_probe()) |
39 | strcpy(c->x86_vendor_id, "NexGenDriven"); | 39 | strcpy(c->x86_vendor_id, "NexGenDriven"); |
40 | } | ||
41 | } | 40 | } |
42 | 41 | ||
43 | static struct cpu_dev nexgen_cpu_dev __cpuinitdata = { | 42 | static struct cpu_dev nexgen_cpu_dev __cpuinitdata = { |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index af11d31dce0a..0978a4a39418 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -8,78 +8,139 @@ | |||
8 | /* | 8 | /* |
9 | * Get CPU information for use by the procfs. | 9 | * Get CPU information for use by the procfs. |
10 | */ | 10 | */ |
11 | #ifdef CONFIG_X86_32 | ||
12 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
13 | unsigned int cpu) | ||
14 | { | ||
15 | #ifdef CONFIG_X86_HT | ||
16 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
17 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
18 | seq_printf(m, "siblings\t: %d\n", | ||
19 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
20 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
21 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
22 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
23 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
24 | } | ||
25 | #endif | ||
26 | } | ||
27 | |||
28 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | ||
29 | { | ||
30 | /* | ||
31 | * We use exception 16 if we have hardware math and we've either seen | ||
32 | * it or the CPU claims it is internal | ||
33 | */ | ||
34 | int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); | ||
35 | seq_printf(m, | ||
36 | "fdiv_bug\t: %s\n" | ||
37 | "hlt_bug\t\t: %s\n" | ||
38 | "f00f_bug\t: %s\n" | ||
39 | "coma_bug\t: %s\n" | ||
40 | "fpu\t\t: %s\n" | ||
41 | "fpu_exception\t: %s\n" | ||
42 | "cpuid level\t: %d\n" | ||
43 | "wp\t\t: %s\n", | ||
44 | c->fdiv_bug ? "yes" : "no", | ||
45 | c->hlt_works_ok ? "no" : "yes", | ||
46 | c->f00f_bug ? "yes" : "no", | ||
47 | c->coma_bug ? "yes" : "no", | ||
48 | c->hard_math ? "yes" : "no", | ||
49 | fpu_exception ? "yes" : "no", | ||
50 | c->cpuid_level, | ||
51 | c->wp_works_ok ? "yes" : "no"); | ||
52 | } | ||
53 | #else | ||
54 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
55 | unsigned int cpu) | ||
56 | { | ||
57 | #ifdef CONFIG_SMP | ||
58 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
59 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
60 | seq_printf(m, "siblings\t: %d\n", | ||
61 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
62 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
63 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
64 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
65 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
66 | } | ||
67 | #endif | ||
68 | } | ||
69 | |||
70 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | ||
71 | { | ||
72 | seq_printf(m, | ||
73 | "fpu\t\t: yes\n" | ||
74 | "fpu_exception\t: yes\n" | ||
75 | "cpuid level\t: %d\n" | ||
76 | "wp\t\t: yes\n", | ||
77 | c->cpuid_level); | ||
78 | } | ||
79 | #endif | ||
80 | |||
11 | static int show_cpuinfo(struct seq_file *m, void *v) | 81 | static int show_cpuinfo(struct seq_file *m, void *v) |
12 | { | 82 | { |
13 | struct cpuinfo_x86 *c = v; | 83 | struct cpuinfo_x86 *c = v; |
14 | int i, n = 0; | 84 | unsigned int cpu = 0; |
15 | int fpu_exception; | 85 | int i; |
16 | 86 | ||
17 | #ifdef CONFIG_SMP | 87 | #ifdef CONFIG_SMP |
18 | n = c->cpu_index; | 88 | cpu = c->cpu_index; |
19 | #endif | 89 | #endif |
20 | seq_printf(m, "processor\t: %d\n" | 90 | seq_printf(m, "processor\t: %u\n" |
21 | "vendor_id\t: %s\n" | 91 | "vendor_id\t: %s\n" |
22 | "cpu family\t: %d\n" | 92 | "cpu family\t: %d\n" |
23 | "model\t\t: %d\n" | 93 | "model\t\t: %u\n" |
24 | "model name\t: %s\n", | 94 | "model name\t: %s\n", |
25 | n, | 95 | cpu, |
26 | c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", | 96 | c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", |
27 | c->x86, | 97 | c->x86, |
28 | c->x86_model, | 98 | c->x86_model, |
29 | c->x86_model_id[0] ? c->x86_model_id : "unknown"); | 99 | c->x86_model_id[0] ? c->x86_model_id : "unknown"); |
30 | 100 | ||
31 | if (c->x86_mask || c->cpuid_level >= 0) | 101 | if (c->x86_mask || c->cpuid_level >= 0) |
32 | seq_printf(m, "stepping\t: %d\n", c->x86_mask); | 102 | seq_printf(m, "stepping\t: %d\n", c->x86_mask); |
33 | else | 103 | else |
34 | seq_printf(m, "stepping\t: unknown\n"); | 104 | seq_printf(m, "stepping\t: unknown\n"); |
35 | 105 | ||
36 | if ( cpu_has(c, X86_FEATURE_TSC) ) { | 106 | if (cpu_has(c, X86_FEATURE_TSC)) { |
37 | unsigned int freq = cpufreq_quick_get(n); | 107 | unsigned int freq = cpufreq_quick_get(cpu); |
108 | |||
38 | if (!freq) | 109 | if (!freq) |
39 | freq = cpu_khz; | 110 | freq = cpu_khz; |
40 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", | 111 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", |
41 | freq / 1000, (freq % 1000)); | 112 | freq / 1000, (freq % 1000)); |
42 | } | 113 | } |
43 | 114 | ||
44 | /* Cache size */ | 115 | /* Cache size */ |
45 | if (c->x86_cache_size >= 0) | 116 | if (c->x86_cache_size >= 0) |
46 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); | 117 | seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); |
47 | #ifdef CONFIG_X86_HT | 118 | |
48 | if (c->x86_max_cores * smp_num_siblings > 1) { | 119 | show_cpuinfo_core(m, c, cpu); |
49 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 120 | show_cpuinfo_misc(m, c); |
50 | seq_printf(m, "siblings\t: %d\n", | 121 | |
51 | cpus_weight(per_cpu(cpu_core_map, n))); | 122 | seq_printf(m, "flags\t\t:"); |
52 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | 123 | for (i = 0; i < 32*NCAPINTS; i++) |
53 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 124 | if (cpu_has(c, i) && x86_cap_flags[i] != NULL) |
54 | } | ||
55 | #endif | ||
56 | |||
57 | /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */ | ||
58 | fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu); | ||
59 | seq_printf(m, "fdiv_bug\t: %s\n" | ||
60 | "hlt_bug\t\t: %s\n" | ||
61 | "f00f_bug\t: %s\n" | ||
62 | "coma_bug\t: %s\n" | ||
63 | "fpu\t\t: %s\n" | ||
64 | "fpu_exception\t: %s\n" | ||
65 | "cpuid level\t: %d\n" | ||
66 | "wp\t\t: %s\n" | ||
67 | "flags\t\t:", | ||
68 | c->fdiv_bug ? "yes" : "no", | ||
69 | c->hlt_works_ok ? "no" : "yes", | ||
70 | c->f00f_bug ? "yes" : "no", | ||
71 | c->coma_bug ? "yes" : "no", | ||
72 | c->hard_math ? "yes" : "no", | ||
73 | fpu_exception ? "yes" : "no", | ||
74 | c->cpuid_level, | ||
75 | c->wp_works_ok ? "yes" : "no"); | ||
76 | |||
77 | for ( i = 0 ; i < 32*NCAPINTS ; i++ ) | ||
78 | if ( test_bit(i, c->x86_capability) && | ||
79 | x86_cap_flags[i] != NULL ) | ||
80 | seq_printf(m, " %s", x86_cap_flags[i]); | 125 | seq_printf(m, " %s", x86_cap_flags[i]); |
81 | 126 | ||
82 | for (i = 0; i < 32; i++) | 127 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", |
128 | c->loops_per_jiffy/(500000/HZ), | ||
129 | (c->loops_per_jiffy/(5000/HZ)) % 100); | ||
130 | |||
131 | #ifdef CONFIG_X86_64 | ||
132 | if (c->x86_tlbsize > 0) | ||
133 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); | ||
134 | #endif | ||
135 | seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); | ||
136 | #ifdef CONFIG_X86_64 | ||
137 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); | ||
138 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", | ||
139 | c->x86_phys_bits, c->x86_virt_bits); | ||
140 | #endif | ||
141 | |||
142 | seq_printf(m, "power management:"); | ||
143 | for (i = 0; i < 32; i++) { | ||
83 | if (c->x86_power & (1 << i)) { | 144 | if (c->x86_power & (1 << i)) { |
84 | if (i < ARRAY_SIZE(x86_power_flags) && | 145 | if (i < ARRAY_SIZE(x86_power_flags) && |
85 | x86_power_flags[i]) | 146 | x86_power_flags[i]) |
@@ -89,11 +150,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
89 | else | 150 | else |
90 | seq_printf(m, " [%d]", i); | 151 | seq_printf(m, " [%d]", i); |
91 | } | 152 | } |
153 | } | ||
92 | 154 | ||
93 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", | 155 | seq_printf(m, "\n\n"); |
94 | c->loops_per_jiffy/(500000/HZ), | ||
95 | (c->loops_per_jiffy/(5000/HZ)) % 100); | ||
96 | seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size); | ||
97 | 156 | ||
98 | return 0; | 157 | return 0; |
99 | } | 158 | } |
@@ -106,14 +165,17 @@ static void *c_start(struct seq_file *m, loff_t *pos) | |||
106 | return &cpu_data(*pos); | 165 | return &cpu_data(*pos); |
107 | return NULL; | 166 | return NULL; |
108 | } | 167 | } |
168 | |||
109 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | 169 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) |
110 | { | 170 | { |
111 | *pos = next_cpu(*pos, cpu_online_map); | 171 | *pos = next_cpu(*pos, cpu_online_map); |
112 | return c_start(m, pos); | 172 | return c_start(m, pos); |
113 | } | 173 | } |
174 | |||
114 | static void c_stop(struct seq_file *m, void *v) | 175 | static void c_stop(struct seq_file *m, void *v) |
115 | { | 176 | { |
116 | } | 177 | } |
178 | |||
117 | const struct seq_operations cpuinfo_op = { | 179 | const struct seq_operations cpuinfo_op = { |
118 | .start = c_start, | 180 | .start = c_start, |
119 | .next = c_next, | 181 | .next = c_next, |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index e8b422c1c512..b911a2c61b8f 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -18,8 +18,8 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
18 | /* Print CMS and CPU revision */ | 18 | /* Print CMS and CPU revision */ |
19 | max = cpuid_eax(0x80860000); | 19 | max = cpuid_eax(0x80860000); |
20 | cpu_rev = 0; | 20 | cpu_rev = 0; |
21 | if ( max >= 0x80860001 ) { | 21 | if (max >= 0x80860001) { |
22 | cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); | 22 | cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); |
23 | if (cpu_rev != 0x02000000) { | 23 | if (cpu_rev != 0x02000000) { |
24 | printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n", | 24 | printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n", |
25 | (cpu_rev >> 24) & 0xff, | 25 | (cpu_rev >> 24) & 0xff, |
@@ -29,7 +29,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
29 | cpu_freq); | 29 | cpu_freq); |
30 | } | 30 | } |
31 | } | 31 | } |
32 | if ( max >= 0x80860002 ) { | 32 | if (max >= 0x80860002) { |
33 | cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); | 33 | cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); |
34 | if (cpu_rev == 0x02000000) { | 34 | if (cpu_rev == 0x02000000) { |
35 | printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n", | 35 | printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n", |
@@ -42,7 +42,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
42 | cms_rev1 & 0xff, | 42 | cms_rev1 & 0xff, |
43 | cms_rev2); | 43 | cms_rev2); |
44 | } | 44 | } |
45 | if ( max >= 0x80860006 ) { | 45 | if (max >= 0x80860006) { |
46 | cpuid(0x80860003, | 46 | cpuid(0x80860003, |
47 | (void *)&cpu_info[0], | 47 | (void *)&cpu_info[0], |
48 | (void *)&cpu_info[4], | 48 | (void *)&cpu_info[4], |
@@ -74,23 +74,25 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
74 | wrmsr(0x80860004, cap_mask, uk); | 74 | wrmsr(0x80860004, cap_mask, uk); |
75 | 75 | ||
76 | /* All Transmeta CPUs have a constant TSC */ | 76 | /* All Transmeta CPUs have a constant TSC */ |
77 | set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); | 77 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
78 | 78 | ||
79 | #ifdef CONFIG_SYSCTL | 79 | #ifdef CONFIG_SYSCTL |
80 | /* randomize_va_space slows us down enormously; | 80 | /* |
81 | it probably triggers retranslation of x86->native bytecode */ | 81 | * randomize_va_space slows us down enormously; |
82 | * it probably triggers retranslation of x86->native bytecode | ||
83 | */ | ||
82 | randomize_va_space = 0; | 84 | randomize_va_space = 0; |
83 | #endif | 85 | #endif |
84 | } | 86 | } |
85 | 87 | ||
86 | static void __cpuinit transmeta_identify(struct cpuinfo_x86 * c) | 88 | static void __cpuinit transmeta_identify(struct cpuinfo_x86 *c) |
87 | { | 89 | { |
88 | u32 xlvl; | 90 | u32 xlvl; |
89 | 91 | ||
90 | /* Transmeta-defined flags: level 0x80860001 */ | 92 | /* Transmeta-defined flags: level 0x80860001 */ |
91 | xlvl = cpuid_eax(0x80860000); | 93 | xlvl = cpuid_eax(0x80860000); |
92 | if ( (xlvl & 0xffff0000) == 0x80860000 ) { | 94 | if ((xlvl & 0xffff0000) == 0x80860000) { |
93 | if ( xlvl >= 0x80860001 ) | 95 | if (xlvl >= 0x80860001) |
94 | c->x86_capability[2] = cpuid_edx(0x80860001); | 96 | c->x86_capability[2] = cpuid_edx(0x80860001); |
95 | } | 97 | } |
96 | } | 98 | } |
@@ -102,8 +104,4 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { | |||
102 | .c_identify = transmeta_identify, | 104 | .c_identify = transmeta_identify, |
103 | }; | 105 | }; |
104 | 106 | ||
105 | int __init transmeta_init_cpu(void) | 107 | cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); |
106 | { | ||
107 | cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev; | ||
108 | return 0; | ||
109 | } | ||
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index a7a4e75bdcd7..b1fc90989d75 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
@@ -3,24 +3,23 @@ | |||
3 | #include <asm/processor.h> | 3 | #include <asm/processor.h> |
4 | #include "cpu.h" | 4 | #include "cpu.h" |
5 | 5 | ||
6 | /* UMC chips appear to be only either 386 or 486, so no special init takes place. | 6 | /* |
7 | * UMC chips appear to be only either 386 or 486, | ||
8 | * so no special init takes place. | ||
7 | */ | 9 | */ |
8 | 10 | ||
9 | static struct cpu_dev umc_cpu_dev __cpuinitdata = { | 11 | static struct cpu_dev umc_cpu_dev __cpuinitdata = { |
10 | .c_vendor = "UMC", | 12 | .c_vendor = "UMC", |
11 | .c_ident = { "UMC UMC UMC" }, | 13 | .c_ident = { "UMC UMC UMC" }, |
12 | .c_models = { | 14 | .c_models = { |
13 | { .vendor = X86_VENDOR_UMC, .family = 4, .model_names = | 15 | { .vendor = X86_VENDOR_UMC, .family = 4, .model_names = |
14 | { | 16 | { |
15 | [1] = "U5D", | 17 | [1] = "U5D", |
16 | [2] = "U5S", | 18 | [2] = "U5S", |
17 | } | 19 | } |
18 | }, | 20 | }, |
19 | }, | 21 | }, |
20 | }; | 22 | }; |
21 | 23 | ||
22 | int __init umc_init_cpu(void) | 24 | cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); |
23 | { | 25 | |
24 | cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev; | ||
25 | return 0; | ||
26 | } | ||