aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:16:58 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:16:58 -0400
commitf7627e2513987bb5d4e8cb13c4e0a478352141ac (patch)
tree46ef70a107285c1dfe8161a57f433d30252d285a /arch/x86/kernel
parent4ac24f63fd203bc12a841a88a2034dccd358d0d1 (diff)
i386: move kernel/cpu
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/Makefile20
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c50
-rw-r--r--arch/x86/kernel/cpu/amd.c337
-rw-r--r--arch/x86/kernel/cpu/bugs.c192
-rw-r--r--arch/x86/kernel/cpu/centaur.c471
-rw-r--r--arch/x86/kernel/cpu/common.c733
-rw-r--r--arch/x86/kernel/cpu/cpu.h28
-rw-r--r--arch/x86/kernel/cpu/cyrix.c463
-rw-r--r--arch/x86/kernel/cpu/intel.c333
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c806
-rw-r--r--arch/x86/kernel/cpu/nexgen.c60
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c713
-rw-r--r--arch/x86/kernel/cpu/proc.c192
-rw-r--r--arch/x86/kernel/cpu/transmeta.c116
-rw-r--r--arch/x86/kernel/cpu/umc.c26
15 files changed, 4540 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
new file mode 100644
index 000000000000..6687f6d5ad2f
--- /dev/null
+++ b/arch/x86/kernel/cpu/Makefile
@@ -0,0 +1,20 @@
1#
2# Makefile for x86-compatible CPU details and quirks
3#
4
5obj-y := common.o proc.o bugs.o
6
7obj-y += amd.o
8obj-y += cyrix.o
9obj-y += centaur.o
10obj-y += transmeta.o
11obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o
12obj-y += nexgen.o
13obj-y += umc.o
14
15obj-$(CONFIG_X86_MCE) += ../../../x86/kernel/cpu/mcheck/
16
17obj-$(CONFIG_MTRR) += ../../../x86/kernel/cpu/mtrr/
18obj-$(CONFIG_CPU_FREQ) += ../../../x86/kernel/cpu/cpufreq/
19
20obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
new file mode 100644
index 000000000000..3e91d3ee26ec
--- /dev/null
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -0,0 +1,50 @@
1
2/*
3 * Routines to indentify additional cpu features that are scattered in
4 * cpuid space.
5 */
6
7#include <linux/cpu.h>
8
9#include <asm/processor.h>
10
11struct cpuid_bit {
12 u16 feature;
13 u8 reg;
14 u8 bit;
15 u32 level;
16};
17
18enum cpuid_regs {
19 CR_EAX = 0,
20 CR_ECX,
21 CR_EDX,
22 CR_EBX
23};
24
25void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
26{
27 u32 max_level;
28 u32 regs[4];
29 const struct cpuid_bit *cb;
30
31 static const struct cpuid_bit cpuid_bits[] = {
32 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
33 { 0, 0, 0, 0 }
34 };
35
36 for (cb = cpuid_bits; cb->feature; cb++) {
37
38 /* Verify that the level is valid */
39 max_level = cpuid_eax(cb->level & 0xffff0000);
40 if (max_level < cb->level ||
41 max_level > (cb->level | 0xffff))
42 continue;
43
44 cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
45 &regs[CR_ECX], &regs[CR_EDX]);
46
47 if (regs[cb->reg] & (1 << cb->bit))
48 set_bit(cb->feature, c->x86_capability);
49 }
50}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
new file mode 100644
index 000000000000..dcf6bbb1c7c0
--- /dev/null
+++ b/arch/x86/kernel/cpu/amd.c
@@ -0,0 +1,337 @@
1#include <linux/init.h>
2#include <linux/bitops.h>
3#include <linux/mm.h>
4#include <asm/io.h>
5#include <asm/processor.h>
6#include <asm/apic.h>
7
8#include "cpu.h"
9
10/*
11 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
12 * misexecution of code under Linux. Owners of such processors should
13 * contact AMD for precise details and a CPU swap.
14 *
15 * See http://www.multimania.com/poulot/k6bug.html
16 * http://www.amd.com/K6/k6docs/revgd.html
17 *
18 * The following test is erm.. interesting. AMD neglected to up
19 * the chip setting when fixing the bug but they also tweaked some
20 * performance at the same time..
21 */
22
23extern void vide(void);
24__asm__(".align 4\nvide: ret");
25
26#ifdef CONFIG_X86_LOCAL_APIC
27#define ENABLE_C1E_MASK 0x18000000
28#define CPUID_PROCESSOR_SIGNATURE 1
29#define CPUID_XFAM 0x0ff00000
30#define CPUID_XFAM_K8 0x00000000
31#define CPUID_XFAM_10H 0x00100000
32#define CPUID_XFAM_11H 0x00200000
33#define CPUID_XMOD 0x000f0000
34#define CPUID_XMOD_REV_F 0x00040000
35
36/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
37static __cpuinit int amd_apic_timer_broken(void)
38{
39 u32 lo, hi;
40 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
41 switch (eax & CPUID_XFAM) {
42 case CPUID_XFAM_K8:
43 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
44 break;
45 case CPUID_XFAM_10H:
46 case CPUID_XFAM_11H:
47 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
48 if (lo & ENABLE_C1E_MASK)
49 return 1;
50 break;
51 default:
52 /* err on the side of caution */
53 return 1;
54 }
55 return 0;
56}
57#endif
58
59int force_mwait __cpuinitdata;
60
61static void __cpuinit init_amd(struct cpuinfo_x86 *c)
62{
63 u32 l, h;
64 int mbytes = num_physpages >> (20-PAGE_SHIFT);
65 int r;
66
67#ifdef CONFIG_SMP
68 unsigned long long value;
69
70 /* Disable TLB flush filter by setting HWCR.FFDIS on K8
71 * bit 6 of msr C001_0015
72 *
73 * Errata 63 for SH-B3 steppings
74 * Errata 122 for all steppings (F+ have it disabled by default)
75 */
76 if (c->x86 == 15) {
77 rdmsrl(MSR_K7_HWCR, value);
78 value |= 1 << 6;
79 wrmsrl(MSR_K7_HWCR, value);
80 }
81#endif
82
83 /*
84 * FIXME: We should handle the K5 here. Set up the write
85 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
86 * no bus pipeline)
87 */
88
89 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
90 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
91 clear_bit(0*32+31, c->x86_capability);
92
93 r = get_model_name(c);
94
95 switch(c->x86)
96 {
97 case 4:
98 /*
99 * General Systems BIOSen alias the cpu frequency registers
100 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
101 * drivers subsequently pokes it, and changes the CPU speed.
102 * Workaround : Remove the unneeded alias.
103 */
104#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
105#define CBAR_ENB (0x80000000)
106#define CBAR_KEY (0X000000CB)
107 if (c->x86_model==9 || c->x86_model == 10) {
108 if (inl (CBAR) & CBAR_ENB)
109 outl (0 | CBAR_KEY, CBAR);
110 }
111 break;
112 case 5:
113 if( c->x86_model < 6 )
114 {
115 /* Based on AMD doc 20734R - June 2000 */
116 if ( c->x86_model == 0 ) {
117 clear_bit(X86_FEATURE_APIC, c->x86_capability);
118 set_bit(X86_FEATURE_PGE, c->x86_capability);
119 }
120 break;
121 }
122
123 if ( c->x86_model == 6 && c->x86_mask == 1 ) {
124 const int K6_BUG_LOOP = 1000000;
125 int n;
126 void (*f_vide)(void);
127 unsigned long d, d2;
128
129 printk(KERN_INFO "AMD K6 stepping B detected - ");
130
131 /*
132 * It looks like AMD fixed the 2.6.2 bug and improved indirect
133 * calls at the same time.
134 */
135
136 n = K6_BUG_LOOP;
137 f_vide = vide;
138 rdtscl(d);
139 while (n--)
140 f_vide();
141 rdtscl(d2);
142 d = d2-d;
143
144 if (d > 20*K6_BUG_LOOP)
145 printk("system stability may be impaired when more than 32 MB are used.\n");
146 else
147 printk("probably OK (after B9730xxxx).\n");
148 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
149 }
150
151 /* K6 with old style WHCR */
152 if (c->x86_model < 8 ||
153 (c->x86_model== 8 && c->x86_mask < 8)) {
154 /* We can only write allocate on the low 508Mb */
155 if(mbytes>508)
156 mbytes=508;
157
158 rdmsr(MSR_K6_WHCR, l, h);
159 if ((l&0x0000FFFF)==0) {
160 unsigned long flags;
161 l=(1<<0)|((mbytes/4)<<1);
162 local_irq_save(flags);
163 wbinvd();
164 wrmsr(MSR_K6_WHCR, l, h);
165 local_irq_restore(flags);
166 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
167 mbytes);
168 }
169 break;
170 }
171
172 if ((c->x86_model == 8 && c->x86_mask >7) ||
173 c->x86_model == 9 || c->x86_model == 13) {
174 /* The more serious chips .. */
175
176 if(mbytes>4092)
177 mbytes=4092;
178
179 rdmsr(MSR_K6_WHCR, l, h);
180 if ((l&0xFFFF0000)==0) {
181 unsigned long flags;
182 l=((mbytes>>2)<<22)|(1<<16);
183 local_irq_save(flags);
184 wbinvd();
185 wrmsr(MSR_K6_WHCR, l, h);
186 local_irq_restore(flags);
187 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
188 mbytes);
189 }
190
191 /* Set MTRR capability flag if appropriate */
192 if (c->x86_model == 13 || c->x86_model == 9 ||
193 (c->x86_model == 8 && c->x86_mask >= 8))
194 set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
195 break;
196 }
197
198 if (c->x86_model == 10) {
199 /* AMD Geode LX is model 10 */
200 /* placeholder for any needed mods */
201 break;
202 }
203 break;
204 case 6: /* An Athlon/Duron */
205
206 /* Bit 15 of Athlon specific MSR 15, needs to be 0
207 * to enable SSE on Palomino/Morgan/Barton CPU's.
208 * If the BIOS didn't enable it already, enable it here.
209 */
210 if (c->x86_model >= 6 && c->x86_model <= 10) {
211 if (!cpu_has(c, X86_FEATURE_XMM)) {
212 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
213 rdmsr(MSR_K7_HWCR, l, h);
214 l &= ~0x00008000;
215 wrmsr(MSR_K7_HWCR, l, h);
216 set_bit(X86_FEATURE_XMM, c->x86_capability);
217 }
218 }
219
220 /* It's been determined by AMD that Athlons since model 8 stepping 1
221 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
222 * As per AMD technical note 27212 0.2
223 */
224 if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
225 rdmsr(MSR_K7_CLK_CTL, l, h);
226 if ((l & 0xfff00000) != 0x20000000) {
227 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
228 ((l & 0x000fffff)|0x20000000));
229 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
230 }
231 }
232 break;
233 }
234
235 switch (c->x86) {
236 case 15:
237 /* Use K8 tuning for Fam10h and Fam11h */
238 case 0x10:
239 case 0x11:
240 set_bit(X86_FEATURE_K8, c->x86_capability);
241 break;
242 case 6:
243 set_bit(X86_FEATURE_K7, c->x86_capability);
244 break;
245 }
246 if (c->x86 >= 6)
247 set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
248
249 display_cacheinfo(c);
250
251 if (cpuid_eax(0x80000000) >= 0x80000008) {
252 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
253 }
254
255 if (cpuid_eax(0x80000000) >= 0x80000007) {
256 c->x86_power = cpuid_edx(0x80000007);
257 if (c->x86_power & (1<<8))
258 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
259 }
260
261#ifdef CONFIG_X86_HT
262 /*
263 * On a AMD multi core setup the lower bits of the APIC id
264 * distingush the cores.
265 */
266 if (c->x86_max_cores > 1) {
267 int cpu = smp_processor_id();
268 unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
269
270 if (bits == 0) {
271 while ((1 << bits) < c->x86_max_cores)
272 bits++;
273 }
274 c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
275 c->phys_proc_id >>= bits;
276 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
277 cpu, c->x86_max_cores, c->cpu_core_id);
278 }
279#endif
280
281 if (cpuid_eax(0x80000000) >= 0x80000006) {
282 if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000))
283 num_cache_leaves = 4;
284 else
285 num_cache_leaves = 3;
286 }
287
288#ifdef CONFIG_X86_LOCAL_APIC
289 if (amd_apic_timer_broken())
290 local_apic_timer_disabled = 1;
291#endif
292
293 if (c->x86 == 0x10 && !force_mwait)
294 clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
295
296 /* K6s reports MCEs but don't actually have all the MSRs */
297 if (c->x86 < 6)
298 clear_bit(X86_FEATURE_MCE, c->x86_capability);
299}
300
301static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
302{
303 /* AMD errata T13 (order #21922) */
304 if ((c->x86 == 6)) {
305 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
306 size = 64;
307 if (c->x86_model == 4 &&
308 (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
309 size = 256;
310 }
311 return size;
312}
313
314static struct cpu_dev amd_cpu_dev __cpuinitdata = {
315 .c_vendor = "AMD",
316 .c_ident = { "AuthenticAMD" },
317 .c_models = {
318 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
319 {
320 [3] = "486 DX/2",
321 [7] = "486 DX/2-WB",
322 [8] = "486 DX/4",
323 [9] = "486 DX/4-WB",
324 [14] = "Am5x86-WT",
325 [15] = "Am5x86-WB"
326 }
327 },
328 },
329 .c_init = init_amd,
330 .c_size_cache = amd_size_cache,
331};
332
333int __init amd_init_cpu(void)
334{
335 cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
336 return 0;
337}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
new file mode 100644
index 000000000000..59266f03d1cd
--- /dev/null
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -0,0 +1,192 @@
1/*
2 * arch/i386/cpu/bugs.c
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Cyrix stuff, June 1998 by:
7 * - Rafael R. Reilova (moved everything from head.S),
8 * <rreilova@ececs.uc.edu>
9 * - Channing Corn (tests & fixes),
10 * - Andrew D. Balsa (code cleanup).
11 */
12#include <linux/init.h>
13#include <linux/utsname.h>
14#include <asm/bugs.h>
15#include <asm/processor.h>
16#include <asm/i387.h>
17#include <asm/msr.h>
18#include <asm/paravirt.h>
19#include <asm/alternative.h>
20
21static int __init no_halt(char *s)
22{
23 boot_cpu_data.hlt_works_ok = 0;
24 return 1;
25}
26
27__setup("no-hlt", no_halt);
28
29static int __init mca_pentium(char *s)
30{
31 mca_pentium_flag = 1;
32 return 1;
33}
34
35__setup("mca-pentium", mca_pentium);
36
37static int __init no_387(char *s)
38{
39 boot_cpu_data.hard_math = 0;
40 write_cr0(0xE | read_cr0());
41 return 1;
42}
43
44__setup("no387", no_387);
45
46static double __initdata x = 4195835.0;
47static double __initdata y = 3145727.0;
48
49/*
50 * This used to check for exceptions..
51 * However, it turns out that to support that,
52 * the XMM trap handlers basically had to
53 * be buggy. So let's have a correct XMM trap
54 * handler, and forget about printing out
55 * some status at boot.
56 *
57 * We should really only care about bugs here
58 * anyway. Not features.
59 */
60static void __init check_fpu(void)
61{
62 if (!boot_cpu_data.hard_math) {
63#ifndef CONFIG_MATH_EMULATION
64 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
65 printk(KERN_EMERG "Giving up.\n");
66 for (;;) ;
67#endif
68 return;
69 }
70
71/* trap_init() enabled FXSR and company _before_ testing for FP problems here. */
72 /* Test for the divl bug.. */
73 __asm__("fninit\n\t"
74 "fldl %1\n\t"
75 "fdivl %2\n\t"
76 "fmull %2\n\t"
77 "fldl %1\n\t"
78 "fsubp %%st,%%st(1)\n\t"
79 "fistpl %0\n\t"
80 "fwait\n\t"
81 "fninit"
82 : "=m" (*&boot_cpu_data.fdiv_bug)
83 : "m" (*&x), "m" (*&y));
84 if (boot_cpu_data.fdiv_bug)
85 printk("Hmm, FPU with FDIV bug.\n");
86}
87
88static void __init check_hlt(void)
89{
90 if (paravirt_enabled())
91 return;
92
93 printk(KERN_INFO "Checking 'hlt' instruction... ");
94 if (!boot_cpu_data.hlt_works_ok) {
95 printk("disabled\n");
96 return;
97 }
98 halt();
99 halt();
100 halt();
101 halt();
102 printk("OK.\n");
103}
104
105/*
106 * Most 386 processors have a bug where a POPAD can lock the
107 * machine even from user space.
108 */
109
110static void __init check_popad(void)
111{
112#ifndef CONFIG_X86_POPAD_OK
113 int res, inp = (int) &res;
114
115 printk(KERN_INFO "Checking for popad bug... ");
116 __asm__ __volatile__(
117 "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
118 : "=&a" (res)
119 : "d" (inp)
120 : "ecx", "edi" );
121 /* If this fails, it means that any user program may lock the CPU hard. Too bad. */
122 if (res != 12345678) printk( "Buggy.\n" );
123 else printk( "OK.\n" );
124#endif
125}
126
127/*
128 * Check whether we are able to run this kernel safely on SMP.
129 *
130 * - In order to run on a i386, we need to be compiled for i386
131 * (for due to lack of "invlpg" and working WP on a i386)
132 * - In order to run on anything without a TSC, we need to be
133 * compiled for a i486.
134 * - In order to support the local APIC on a buggy Pentium machine,
135 * we need to be compiled with CONFIG_X86_GOOD_APIC disabled,
136 * which happens implicitly if compiled for a Pentium or lower
137 * (unless an advanced selection of CPU features is used) as an
138 * otherwise config implies a properly working local APIC without
139 * the need to do extra reads from the APIC.
140*/
141
142static void __init check_config(void)
143{
144/*
145 * We'd better not be a i386 if we're configured to use some
146 * i486+ only features! (WP works in supervisor mode and the
147 * new "invlpg" and "bswap" instructions)
148 */
149#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP)
150 if (boot_cpu_data.x86 == 3)
151 panic("Kernel requires i486+ for 'invlpg' and other features");
152#endif
153
154/*
155 * If we configured ourselves for a TSC, we'd better have one!
156 */
157#ifdef CONFIG_X86_TSC
158 if (!cpu_has_tsc && !tsc_disable)
159 panic("Kernel compiled for Pentium+, requires TSC feature!");
160#endif
161
162/*
163 * If we were told we had a good local APIC, check for buggy Pentia,
164 * i.e. all B steppings and the C2 stepping of P54C when using their
165 * integrated APIC (see 11AP erratum in "Pentium Processor
166 * Specification Update").
167 */
168#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
170 && cpu_has_apic
171 && boot_cpu_data.x86 == 5
172 && boot_cpu_data.x86_model == 2
173 && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
174 panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
175#endif
176}
177
178
179void __init check_bugs(void)
180{
181 identify_boot_cpu();
182#ifndef CONFIG_SMP
183 printk("CPU: ");
184 print_cpu_info(&boot_cpu_data);
185#endif
186 check_config();
187 check_fpu();
188 check_hlt();
189 check_popad();
190 init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
191 alternative_instructions();
192}
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
new file mode 100644
index 000000000000..473eac883c7b
--- /dev/null
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -0,0 +1,471 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/bitops.h>
4#include <asm/processor.h>
5#include <asm/msr.h>
6#include <asm/e820.h>
7#include <asm/mtrr.h>
8#include "cpu.h"
9
10#ifdef CONFIG_X86_OOSTORE
11
12static u32 __cpuinit power2(u32 x)
13{
14 u32 s=1;
15 while(s<=x)
16 s<<=1;
17 return s>>=1;
18}
19
20
21/*
22 * Set up an actual MCR
23 */
24
25static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
26{
27 u32 lo, hi;
28
29 hi = base & ~0xFFF;
30 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
31 lo &= ~0xFFF; /* Remove the ctrl value bits */
32 lo |= key; /* Attribute we wish to set */
33 wrmsr(reg+MSR_IDT_MCR0, lo, hi);
34 mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
35}
36
37/*
38 * Figure what we can cover with MCR's
39 *
40 * Shortcut: We know you can't put 4Gig of RAM on a winchip
41 */
42
43static u32 __cpuinit ramtop(void) /* 16388 */
44{
45 int i;
46 u32 top = 0;
47 u32 clip = 0xFFFFFFFFUL;
48
49 for (i = 0; i < e820.nr_map; i++) {
50 unsigned long start, end;
51
52 if (e820.map[i].addr > 0xFFFFFFFFUL)
53 continue;
54 /*
55 * Don't MCR over reserved space. Ignore the ISA hole
56 * we frob around that catastrophy already
57 */
58
59 if (e820.map[i].type == E820_RESERVED)
60 {
61 if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
62 clip = e820.map[i].addr;
63 continue;
64 }
65 start = e820.map[i].addr;
66 end = e820.map[i].addr + e820.map[i].size;
67 if (start >= end)
68 continue;
69 if (end > top)
70 top = end;
71 }
72 /* Everything below 'top' should be RAM except for the ISA hole.
73 Because of the limited MCR's we want to map NV/ACPI into our
74 MCR range for gunk in RAM
75
76 Clip might cause us to MCR insufficient RAM but that is an
77 acceptable failure mode and should only bite obscure boxes with
78 a VESA hole at 15Mb
79
80 The second case Clip sometimes kicks in is when the EBDA is marked
81 as reserved. Again we fail safe with reasonable results
82 */
83
84 if(top>clip)
85 top=clip;
86
87 return top;
88}
89
90/*
91 * Compute a set of MCR's to give maximum coverage
92 */
93
94static int __cpuinit centaur_mcr_compute(int nr, int key)
95{
96 u32 mem = ramtop();
97 u32 root = power2(mem);
98 u32 base = root;
99 u32 top = root;
100 u32 floor = 0;
101 int ct = 0;
102
103 while(ct<nr)
104 {
105 u32 fspace = 0;
106
107 /*
108 * Find the largest block we will fill going upwards
109 */
110
111 u32 high = power2(mem-top);
112
113 /*
114 * Find the largest block we will fill going downwards
115 */
116
117 u32 low = base/2;
118
119 /*
120 * Don't fill below 1Mb going downwards as there
121 * is an ISA hole in the way.
122 */
123
124 if(base <= 1024*1024)
125 low = 0;
126
127 /*
128 * See how much space we could cover by filling below
129 * the ISA hole
130 */
131
132 if(floor == 0)
133 fspace = 512*1024;
134 else if(floor ==512*1024)
135 fspace = 128*1024;
136
137 /* And forget ROM space */
138
139 /*
140 * Now install the largest coverage we get
141 */
142
143 if(fspace > high && fspace > low)
144 {
145 centaur_mcr_insert(ct, floor, fspace, key);
146 floor += fspace;
147 }
148 else if(high > low)
149 {
150 centaur_mcr_insert(ct, top, high, key);
151 top += high;
152 }
153 else if(low > 0)
154 {
155 base -= low;
156 centaur_mcr_insert(ct, base, low, key);
157 }
158 else break;
159 ct++;
160 }
161 /*
162 * We loaded ct values. We now need to set the mask. The caller
163 * must do this bit.
164 */
165
166 return ct;
167}
168
169static void __cpuinit centaur_create_optimal_mcr(void)
170{
171 int i;
172 /*
173 * Allocate up to 6 mcrs to mark as much of ram as possible
174 * as write combining and weak write ordered.
175 *
176 * To experiment with: Linux never uses stack operations for
177 * mmio spaces so we could globally enable stack operation wc
178 *
179 * Load the registers with type 31 - full write combining, all
180 * writes weakly ordered.
181 */
182 int used = centaur_mcr_compute(6, 31);
183
184 /*
185 * Wipe unused MCRs
186 */
187
188 for(i=used;i<8;i++)
189 wrmsr(MSR_IDT_MCR0+i, 0, 0);
190}
191
192static void __cpuinit winchip2_create_optimal_mcr(void)
193{
194 u32 lo, hi;
195 int i;
196
197 /*
198 * Allocate up to 6 mcrs to mark as much of ram as possible
199 * as write combining, weak store ordered.
200 *
201 * Load the registers with type 25
202 * 8 - weak write ordering
203 * 16 - weak read ordering
204 * 1 - write combining
205 */
206
207 int used = centaur_mcr_compute(6, 25);
208
209 /*
210 * Mark the registers we are using.
211 */
212
213 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
214 for(i=0;i<used;i++)
215 lo|=1<<(9+i);
216 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
217
218 /*
219 * Wipe unused MCRs
220 */
221
222 for(i=used;i<8;i++)
223 wrmsr(MSR_IDT_MCR0+i, 0, 0);
224}
225
226/*
227 * Handle the MCR key on the Winchip 2.
228 */
229
230static void __cpuinit winchip2_unprotect_mcr(void)
231{
232 u32 lo, hi;
233 u32 key;
234
235 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
236 lo&=~0x1C0; /* blank bits 8-6 */
237 key = (lo>>17) & 7;
238 lo |= key<<6; /* replace with unlock key */
239 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
240}
241
242static void __cpuinit winchip2_protect_mcr(void)
243{
244 u32 lo, hi;
245
246 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
247 lo&=~0x1C0; /* blank bits 8-6 */
248 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
249}
250#endif /* CONFIG_X86_OOSTORE */
251
252#define ACE_PRESENT (1 << 6)
253#define ACE_ENABLED (1 << 7)
254#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
255
256#define RNG_PRESENT (1 << 2)
257#define RNG_ENABLED (1 << 3)
258#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
259
260static void __cpuinit init_c3(struct cpuinfo_x86 *c)
261{
262 u32 lo, hi;
263
264 /* Test for Centaur Extended Feature Flags presence */
265 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
266 u32 tmp = cpuid_edx(0xC0000001);
267
268 /* enable ACE unit, if present and disabled */
269 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
270 rdmsr (MSR_VIA_FCR, lo, hi);
271 lo |= ACE_FCR; /* enable ACE unit */
272 wrmsr (MSR_VIA_FCR, lo, hi);
273 printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
274 }
275
276 /* enable RNG unit, if present and disabled */
277 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
278 rdmsr (MSR_VIA_RNG, lo, hi);
279 lo |= RNG_ENABLE; /* enable RNG unit */
280 wrmsr (MSR_VIA_RNG, lo, hi);
281 printk(KERN_INFO "CPU: Enabled h/w RNG\n");
282 }
283
284 /* store Centaur Extended Feature Flags as
285 * word 5 of the CPU capability bit array
286 */
287 c->x86_capability[5] = cpuid_edx(0xC0000001);
288 }
289
290 /* Cyrix III family needs CX8 & PGE explicity enabled. */
291 if (c->x86_model >=6 && c->x86_model <= 9) {
292 rdmsr (MSR_VIA_FCR, lo, hi);
293 lo |= (1<<1 | 1<<7);
294 wrmsr (MSR_VIA_FCR, lo, hi);
295 set_bit(X86_FEATURE_CX8, c->x86_capability);
296 }
297
298 /* Before Nehemiah, the C3's had 3dNOW! */
299 if (c->x86_model >=6 && c->x86_model <9)
300 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
301
302 get_model_name(c);
303 display_cacheinfo(c);
304}
305
306static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
307{
308 enum {
309 ECX8=1<<1,
310 EIERRINT=1<<2,
311 DPM=1<<3,
312 DMCE=1<<4,
313 DSTPCLK=1<<5,
314 ELINEAR=1<<6,
315 DSMC=1<<7,
316 DTLOCK=1<<8,
317 EDCTLB=1<<8,
318 EMMX=1<<9,
319 DPDC=1<<11,
320 EBRPRED=1<<12,
321 DIC=1<<13,
322 DDC=1<<14,
323 DNA=1<<15,
324 ERETSTK=1<<16,
325 E2MMX=1<<19,
326 EAMD3D=1<<20,
327 };
328
329 char *name;
330 u32 fcr_set=0;
331 u32 fcr_clr=0;
332 u32 lo,hi,newlo;
333 u32 aa,bb,cc,dd;
334
335 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
336 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
337 clear_bit(0*32+31, c->x86_capability);
338
339 switch (c->x86) {
340
341 case 5:
342 switch(c->x86_model) {
343 case 4:
344 name="C6";
345 fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
346 fcr_clr=DPDC;
347 printk(KERN_NOTICE "Disabling bugged TSC.\n");
348 clear_bit(X86_FEATURE_TSC, c->x86_capability);
349#ifdef CONFIG_X86_OOSTORE
350 centaur_create_optimal_mcr();
351 /* Enable
352 write combining on non-stack, non-string
353 write combining on string, all types
354 weak write ordering
355
356 The C6 original lacks weak read order
357
358 Note 0x120 is write only on Winchip 1 */
359
360 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
361#endif
362 break;
363 case 8:
364 switch(c->x86_mask) {
365 default:
366 name="2";
367 break;
368 case 7 ... 9:
369 name="2A";
370 break;
371 case 10 ... 15:
372 name="2B";
373 break;
374 }
375 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
376 fcr_clr=DPDC;
377#ifdef CONFIG_X86_OOSTORE
378 winchip2_unprotect_mcr();
379 winchip2_create_optimal_mcr();
380 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
381 /* Enable
382 write combining on non-stack, non-string
383 write combining on string, all types
384 weak write ordering
385 */
386 lo|=31;
387 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
388 winchip2_protect_mcr();
389#endif
390 break;
391 case 9:
392 name="3";
393 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
394 fcr_clr=DPDC;
395#ifdef CONFIG_X86_OOSTORE
396 winchip2_unprotect_mcr();
397 winchip2_create_optimal_mcr();
398 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
399 /* Enable
400 write combining on non-stack, non-string
401 write combining on string, all types
402 weak write ordering
403 */
404 lo|=31;
405 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
406 winchip2_protect_mcr();
407#endif
408 break;
409 default:
410 name="??";
411 }
412
413 rdmsr(MSR_IDT_FCR1, lo, hi);
414 newlo=(lo|fcr_set) & (~fcr_clr);
415
416 if (newlo!=lo) {
417 printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
418 wrmsr(MSR_IDT_FCR1, newlo, hi );
419 } else {
420 printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
421 }
422 /* Emulate MTRRs using Centaur's MCR. */
423 set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
424 /* Report CX8 */
425 set_bit(X86_FEATURE_CX8, c->x86_capability);
426 /* Set 3DNow! on Winchip 2 and above. */
427 if (c->x86_model >=8)
428 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
429 /* See if we can find out some more. */
430 if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
431 /* Yes, we can. */
432 cpuid(0x80000005,&aa,&bb,&cc,&dd);
433 /* Add L1 data and code cache sizes. */
434 c->x86_cache_size = (cc>>24)+(dd>>24);
435 }
436 sprintf( c->x86_model_id, "WinChip %s", name );
437 break;
438
439 case 6:
440 init_c3(c);
441 break;
442 }
443}
444
445static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
446{
447 /* VIA C3 CPUs (670-68F) need further shifting. */
448 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
449 size >>= 8;
450
451 /* VIA also screwed up Nehemiah stepping 1, and made
452 it return '65KB' instead of '64KB'
453 - Note, it seems this may only be in engineering samples. */
454 if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
455 size -=1;
456
457 return size;
458}
459
460static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
461 .c_vendor = "Centaur",
462 .c_ident = { "CentaurHauls" },
463 .c_init = init_centaur,
464 .c_size_cache = centaur_size_cache,
465};
466
467int __init centaur_init_cpu(void)
468{
469 cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
470 return 0;
471}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
new file mode 100644
index 000000000000..d506201d397c
--- /dev/null
+++ b/arch/x86/kernel/cpu/common.c
@@ -0,0 +1,733 @@
1#include <linux/init.h>
2#include <linux/string.h>
3#include <linux/delay.h>
4#include <linux/smp.h>
5#include <linux/module.h>
6#include <linux/percpu.h>
7#include <linux/bootmem.h>
8#include <asm/semaphore.h>
9#include <asm/processor.h>
10#include <asm/i387.h>
11#include <asm/msr.h>
12#include <asm/io.h>
13#include <asm/mmu_context.h>
14#include <asm/mtrr.h>
15#include <asm/mce.h>
16#ifdef CONFIG_X86_LOCAL_APIC
17#include <asm/mpspec.h>
18#include <asm/apic.h>
19#include <mach_apic.h>
20#endif
21
22#include "cpu.h"
23
24DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
25 [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
26 [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
27 [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
28 [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
29 /*
30 * Segments used for calling PnP BIOS have byte granularity.
31 * They code segments and data segments have fixed 64k limits,
32 * the transfer segment sizes are set at run time.
33 */
34 [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
35 [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
36 [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
37 [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
38 [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
39 /*
40 * The APM segments have byte granularity and their bases
41 * are set at run time. All have 64k limits.
42 */
43 [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
44 /* 16-bit code */
45 [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
46 [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
47
48 [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
49 [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
50} };
51EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
52
53static int cachesize_override __cpuinitdata = -1;
54static int disable_x86_fxsr __cpuinitdata;
55static int disable_x86_serial_nr __cpuinitdata = 1;
56static int disable_x86_sep __cpuinitdata;
57
58struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
59
60extern int disable_pse;
61
62static void __cpuinit default_init(struct cpuinfo_x86 * c)
63{
64 /* Not much we can do here... */
65 /* Check if at least it has cpuid */
66 if (c->cpuid_level == -1) {
67 /* No cpuid. It must be an ancient CPU */
68 if (c->x86 == 4)
69 strcpy(c->x86_model_id, "486");
70 else if (c->x86 == 3)
71 strcpy(c->x86_model_id, "386");
72 }
73}
74
75static struct cpu_dev __cpuinitdata default_cpu = {
76 .c_init = default_init,
77 .c_vendor = "Unknown",
78};
79static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
80
81static int __init cachesize_setup(char *str)
82{
83 get_option (&str, &cachesize_override);
84 return 1;
85}
86__setup("cachesize=", cachesize_setup);
87
88int __cpuinit get_model_name(struct cpuinfo_x86 *c)
89{
90 unsigned int *v;
91 char *p, *q;
92
93 if (cpuid_eax(0x80000000) < 0x80000004)
94 return 0;
95
96 v = (unsigned int *) c->x86_model_id;
97 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
98 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
99 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
100 c->x86_model_id[48] = 0;
101
102 /* Intel chips right-justify this string for some dumb reason;
103 undo that brain damage */
104 p = q = &c->x86_model_id[0];
105 while ( *p == ' ' )
106 p++;
107 if ( p != q ) {
108 while ( *p )
109 *q++ = *p++;
110 while ( q <= &c->x86_model_id[48] )
111 *q++ = '\0'; /* Zero-pad the rest */
112 }
113
114 return 1;
115}
116
117
118void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
119{
120 unsigned int n, dummy, ecx, edx, l2size;
121
122 n = cpuid_eax(0x80000000);
123
124 if (n >= 0x80000005) {
125 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
126 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
127 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
128 c->x86_cache_size=(ecx>>24)+(edx>>24);
129 }
130
131 if (n < 0x80000006) /* Some chips just has a large L1. */
132 return;
133
134 ecx = cpuid_ecx(0x80000006);
135 l2size = ecx >> 16;
136
137 /* do processor-specific cache resizing */
138 if (this_cpu->c_size_cache)
139 l2size = this_cpu->c_size_cache(c,l2size);
140
141 /* Allow user to override all this if necessary. */
142 if (cachesize_override != -1)
143 l2size = cachesize_override;
144
145 if ( l2size == 0 )
146 return; /* Again, no L2 cache is possible */
147
148 c->x86_cache_size = l2size;
149
150 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
151 l2size, ecx & 0xFF);
152}
153
154/* Naming convention should be: <Name> [(<Codename>)] */
155/* This table only is used unless init_<vendor>() below doesn't set it; */
156/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
157
158/* Look up CPU names by table lookup. */
159static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
160{
161 struct cpu_model_info *info;
162
163 if ( c->x86_model >= 16 )
164 return NULL; /* Range check */
165
166 if (!this_cpu)
167 return NULL;
168
169 info = this_cpu->c_models;
170
171 while (info && info->family) {
172 if (info->family == c->x86)
173 return info->model_names[c->x86_model];
174 info++;
175 }
176 return NULL; /* Not found */
177}
178
179
180static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
181{
182 char *v = c->x86_vendor_id;
183 int i;
184 static int printed;
185
186 for (i = 0; i < X86_VENDOR_NUM; i++) {
187 if (cpu_devs[i]) {
188 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
189 (cpu_devs[i]->c_ident[1] &&
190 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
191 c->x86_vendor = i;
192 if (!early)
193 this_cpu = cpu_devs[i];
194 return;
195 }
196 }
197 }
198 if (!printed) {
199 printed++;
200 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
201 printk(KERN_ERR "CPU: Your system may be unstable.\n");
202 }
203 c->x86_vendor = X86_VENDOR_UNKNOWN;
204 this_cpu = &default_cpu;
205}
206
207
208static int __init x86_fxsr_setup(char * s)
209{
210 /* Tell all the other CPU's to not use it... */
211 disable_x86_fxsr = 1;
212
213 /*
214 * ... and clear the bits early in the boot_cpu_data
215 * so that the bootup process doesn't try to do this
216 * either.
217 */
218 clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
219 clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
220 return 1;
221}
222__setup("nofxsr", x86_fxsr_setup);
223
224
225static int __init x86_sep_setup(char * s)
226{
227 disable_x86_sep = 1;
228 return 1;
229}
230__setup("nosep", x86_sep_setup);
231
232
233/* Standard macro to see if a specific flag is changeable */
234static inline int flag_is_changeable_p(u32 flag)
235{
236 u32 f1, f2;
237
238 asm("pushfl\n\t"
239 "pushfl\n\t"
240 "popl %0\n\t"
241 "movl %0,%1\n\t"
242 "xorl %2,%0\n\t"
243 "pushl %0\n\t"
244 "popfl\n\t"
245 "pushfl\n\t"
246 "popl %0\n\t"
247 "popfl\n\t"
248 : "=&r" (f1), "=&r" (f2)
249 : "ir" (flag));
250
251 return ((f1^f2) & flag) != 0;
252}
253
254
255/* Probe for the CPUID instruction */
256static int __cpuinit have_cpuid_p(void)
257{
258 return flag_is_changeable_p(X86_EFLAGS_ID);
259}
260
261void __init cpu_detect(struct cpuinfo_x86 *c)
262{
263 /* Get vendor name */
264 cpuid(0x00000000, &c->cpuid_level,
265 (int *)&c->x86_vendor_id[0],
266 (int *)&c->x86_vendor_id[8],
267 (int *)&c->x86_vendor_id[4]);
268
269 c->x86 = 4;
270 if (c->cpuid_level >= 0x00000001) {
271 u32 junk, tfms, cap0, misc;
272 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
273 c->x86 = (tfms >> 8) & 15;
274 c->x86_model = (tfms >> 4) & 15;
275 if (c->x86 == 0xf)
276 c->x86 += (tfms >> 20) & 0xff;
277 if (c->x86 >= 0x6)
278 c->x86_model += ((tfms >> 16) & 0xF) << 4;
279 c->x86_mask = tfms & 15;
280 if (cap0 & (1<<19))
281 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
282 }
283}
284
285/* Do minimum CPU detection early.
286 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
287 The others are not touched to avoid unwanted side effects.
288
289 WARNING: this function is only called on the BP. Don't add code here
290 that is supposed to run on all CPUs. */
291static void __init early_cpu_detect(void)
292{
293 struct cpuinfo_x86 *c = &boot_cpu_data;
294
295 c->x86_cache_alignment = 32;
296
297 if (!have_cpuid_p())
298 return;
299
300 cpu_detect(c);
301
302 get_cpu_vendor(c, 1);
303}
304
305static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
306{
307 u32 tfms, xlvl;
308 int ebx;
309
310 if (have_cpuid_p()) {
311 /* Get vendor name */
312 cpuid(0x00000000, &c->cpuid_level,
313 (int *)&c->x86_vendor_id[0],
314 (int *)&c->x86_vendor_id[8],
315 (int *)&c->x86_vendor_id[4]);
316
317 get_cpu_vendor(c, 0);
318 /* Initialize the standard set of capabilities */
319 /* Note that the vendor-specific code below might override */
320
321 /* Intel-defined flags: level 0x00000001 */
322 if ( c->cpuid_level >= 0x00000001 ) {
323 u32 capability, excap;
324 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
325 c->x86_capability[0] = capability;
326 c->x86_capability[4] = excap;
327 c->x86 = (tfms >> 8) & 15;
328 c->x86_model = (tfms >> 4) & 15;
329 if (c->x86 == 0xf)
330 c->x86 += (tfms >> 20) & 0xff;
331 if (c->x86 >= 0x6)
332 c->x86_model += ((tfms >> 16) & 0xF) << 4;
333 c->x86_mask = tfms & 15;
334#ifdef CONFIG_X86_HT
335 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
336#else
337 c->apicid = (ebx >> 24) & 0xFF;
338#endif
339 if (c->x86_capability[0] & (1<<19))
340 c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
341 } else {
342 /* Have CPUID level 0 only - unheard of */
343 c->x86 = 4;
344 }
345
346 /* AMD-defined flags: level 0x80000001 */
347 xlvl = cpuid_eax(0x80000000);
348 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
349 if ( xlvl >= 0x80000001 ) {
350 c->x86_capability[1] = cpuid_edx(0x80000001);
351 c->x86_capability[6] = cpuid_ecx(0x80000001);
352 }
353 if ( xlvl >= 0x80000004 )
354 get_model_name(c); /* Default name */
355 }
356
357 init_scattered_cpuid_features(c);
358 }
359
360 early_intel_workaround(c);
361
362#ifdef CONFIG_X86_HT
363 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
364#endif
365}
366
367static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
368{
369 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
370 /* Disable processor serial number */
371 unsigned long lo,hi;
372 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
373 lo |= 0x200000;
374 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
375 printk(KERN_NOTICE "CPU serial number disabled.\n");
376 clear_bit(X86_FEATURE_PN, c->x86_capability);
377
378 /* Disabling the serial number may affect the cpuid level */
379 c->cpuid_level = cpuid_eax(0);
380 }
381}
382
383static int __init x86_serial_nr_setup(char *s)
384{
385 disable_x86_serial_nr = 0;
386 return 1;
387}
388__setup("serialnumber", x86_serial_nr_setup);
389
390
391
392/*
393 * This does the hard work of actually picking apart the CPU stuff...
394 */
395static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
396{
397 int i;
398
399 c->loops_per_jiffy = loops_per_jiffy;
400 c->x86_cache_size = -1;
401 c->x86_vendor = X86_VENDOR_UNKNOWN;
402 c->cpuid_level = -1; /* CPUID not detected */
403 c->x86_model = c->x86_mask = 0; /* So far unknown... */
404 c->x86_vendor_id[0] = '\0'; /* Unset */
405 c->x86_model_id[0] = '\0'; /* Unset */
406 c->x86_max_cores = 1;
407 c->x86_clflush_size = 32;
408 memset(&c->x86_capability, 0, sizeof c->x86_capability);
409
410 if (!have_cpuid_p()) {
411 /* First of all, decide if this is a 486 or higher */
412 /* It's a 486 if we can modify the AC flag */
413 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
414 c->x86 = 4;
415 else
416 c->x86 = 3;
417 }
418
419 generic_identify(c);
420
421 printk(KERN_DEBUG "CPU: After generic identify, caps:");
422 for (i = 0; i < NCAPINTS; i++)
423 printk(" %08lx", c->x86_capability[i]);
424 printk("\n");
425
426 if (this_cpu->c_identify) {
427 this_cpu->c_identify(c);
428
429 printk(KERN_DEBUG "CPU: After vendor identify, caps:");
430 for (i = 0; i < NCAPINTS; i++)
431 printk(" %08lx", c->x86_capability[i]);
432 printk("\n");
433 }
434
435 /*
436 * Vendor-specific initialization. In this section we
437 * canonicalize the feature flags, meaning if there are
438 * features a certain CPU supports which CPUID doesn't
439 * tell us, CPUID claiming incorrect flags, or other bugs,
440 * we handle them here.
441 *
442 * At the end of this section, c->x86_capability better
443 * indicate the features this CPU genuinely supports!
444 */
445 if (this_cpu->c_init)
446 this_cpu->c_init(c);
447
448 /* Disable the PN if appropriate */
449 squash_the_stupid_serial_number(c);
450
451 /*
452 * The vendor-specific functions might have changed features. Now
453 * we do "generic changes."
454 */
455
456 /* TSC disabled? */
457 if ( tsc_disable )
458 clear_bit(X86_FEATURE_TSC, c->x86_capability);
459
460 /* FXSR disabled? */
461 if (disable_x86_fxsr) {
462 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
463 clear_bit(X86_FEATURE_XMM, c->x86_capability);
464 }
465
466 /* SEP disabled? */
467 if (disable_x86_sep)
468 clear_bit(X86_FEATURE_SEP, c->x86_capability);
469
470 if (disable_pse)
471 clear_bit(X86_FEATURE_PSE, c->x86_capability);
472
473 /* If the model name is still unset, do table lookup. */
474 if ( !c->x86_model_id[0] ) {
475 char *p;
476 p = table_lookup_model(c);
477 if ( p )
478 strcpy(c->x86_model_id, p);
479 else
480 /* Last resort... */
481 sprintf(c->x86_model_id, "%02x/%02x",
482 c->x86, c->x86_model);
483 }
484
485 /* Now the feature flags better reflect actual CPU features! */
486
487 printk(KERN_DEBUG "CPU: After all inits, caps:");
488 for (i = 0; i < NCAPINTS; i++)
489 printk(" %08lx", c->x86_capability[i]);
490 printk("\n");
491
492 /*
493 * On SMP, boot_cpu_data holds the common feature set between
494 * all CPUs; so make sure that we indicate which features are
495 * common between the CPUs. The first time this routine gets
496 * executed, c == &boot_cpu_data.
497 */
498 if ( c != &boot_cpu_data ) {
499 /* AND the already accumulated flags with these */
500 for ( i = 0 ; i < NCAPINTS ; i++ )
501 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
502 }
503
504 /* Init Machine Check Exception if available. */
505 mcheck_init(c);
506}
507
508void __init identify_boot_cpu(void)
509{
510 identify_cpu(&boot_cpu_data);
511 sysenter_setup();
512 enable_sep_cpu();
513 mtrr_bp_init();
514}
515
516void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
517{
518 BUG_ON(c == &boot_cpu_data);
519 identify_cpu(c);
520 enable_sep_cpu();
521 mtrr_ap_init();
522}
523
524#ifdef CONFIG_X86_HT
525void __cpuinit detect_ht(struct cpuinfo_x86 *c)
526{
527 u32 eax, ebx, ecx, edx;
528 int index_msb, core_bits;
529
530 cpuid(1, &eax, &ebx, &ecx, &edx);
531
532 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
533 return;
534
535 smp_num_siblings = (ebx & 0xff0000) >> 16;
536
537 if (smp_num_siblings == 1) {
538 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
539 } else if (smp_num_siblings > 1 ) {
540
541 if (smp_num_siblings > NR_CPUS) {
542 printk(KERN_WARNING "CPU: Unsupported number of the "
543 "siblings %d", smp_num_siblings);
544 smp_num_siblings = 1;
545 return;
546 }
547
548 index_msb = get_count_order(smp_num_siblings);
549 c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
550
551 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
552 c->phys_proc_id);
553
554 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
555
556 index_msb = get_count_order(smp_num_siblings) ;
557
558 core_bits = get_count_order(c->x86_max_cores);
559
560 c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
561 ((1 << core_bits) - 1);
562
563 if (c->x86_max_cores > 1)
564 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
565 c->cpu_core_id);
566 }
567}
568#endif
569
570void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
571{
572 char *vendor = NULL;
573
574 if (c->x86_vendor < X86_VENDOR_NUM)
575 vendor = this_cpu->c_vendor;
576 else if (c->cpuid_level >= 0)
577 vendor = c->x86_vendor_id;
578
579 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
580 printk("%s ", vendor);
581
582 if (!c->x86_model_id[0])
583 printk("%d86", c->x86);
584 else
585 printk("%s", c->x86_model_id);
586
587 if (c->x86_mask || c->cpuid_level >= 0)
588 printk(" stepping %02x\n", c->x86_mask);
589 else
590 printk("\n");
591}
592
593cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
594
595/* This is hacky. :)
596 * We're emulating future behavior.
597 * In the future, the cpu-specific init functions will be called implicitly
598 * via the magic of initcalls.
599 * They will insert themselves into the cpu_devs structure.
600 * Then, when cpu_init() is called, we can just iterate over that array.
601 */
602
603extern int intel_cpu_init(void);
604extern int cyrix_init_cpu(void);
605extern int nsc_init_cpu(void);
606extern int amd_init_cpu(void);
607extern int centaur_init_cpu(void);
608extern int transmeta_init_cpu(void);
609extern int nexgen_init_cpu(void);
610extern int umc_init_cpu(void);
611
612void __init early_cpu_init(void)
613{
614 intel_cpu_init();
615 cyrix_init_cpu();
616 nsc_init_cpu();
617 amd_init_cpu();
618 centaur_init_cpu();
619 transmeta_init_cpu();
620 nexgen_init_cpu();
621 umc_init_cpu();
622 early_cpu_detect();
623
624#ifdef CONFIG_DEBUG_PAGEALLOC
625 /* pse is not compatible with on-the-fly unmapping,
626 * disable it even if the cpus claim to support it.
627 */
628 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
629 disable_pse = 1;
630#endif
631}
632
633/* Make sure %fs is initialized properly in idle threads */
634struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
635{
636 memset(regs, 0, sizeof(struct pt_regs));
637 regs->xfs = __KERNEL_PERCPU;
638 return regs;
639}
640
641/* Current gdt points %fs at the "master" per-cpu area: after this,
642 * it's on the real one. */
643void switch_to_new_gdt(void)
644{
645 struct Xgt_desc_struct gdt_descr;
646
647 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
648 gdt_descr.size = GDT_SIZE - 1;
649 load_gdt(&gdt_descr);
650 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
651}
652
653/*
654 * cpu_init() initializes state that is per-CPU. Some data is already
655 * initialized (naturally) in the bootstrap process, such as the GDT
656 * and IDT. We reload them nevertheless, this function acts as a
657 * 'CPU state barrier', nothing should get across.
658 */
659void __cpuinit cpu_init(void)
660{
661 int cpu = smp_processor_id();
662 struct task_struct *curr = current;
663 struct tss_struct * t = &per_cpu(init_tss, cpu);
664 struct thread_struct *thread = &curr->thread;
665
666 if (cpu_test_and_set(cpu, cpu_initialized)) {
667 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
668 for (;;) local_irq_enable();
669 }
670
671 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
672
673 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
674 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
675 if (tsc_disable && cpu_has_tsc) {
676 printk(KERN_NOTICE "Disabling TSC...\n");
677 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
678 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
679 set_in_cr4(X86_CR4_TSD);
680 }
681
682 load_idt(&idt_descr);
683 switch_to_new_gdt();
684
685 /*
686 * Set up and load the per-CPU TSS and LDT
687 */
688 atomic_inc(&init_mm.mm_count);
689 curr->active_mm = &init_mm;
690 if (curr->mm)
691 BUG();
692 enter_lazy_tlb(&init_mm, curr);
693
694 load_esp0(t, thread);
695 set_tss_desc(cpu,t);
696 load_TR_desc();
697 load_LDT(&init_mm.context);
698
699#ifdef CONFIG_DOUBLEFAULT
700 /* Set up doublefault TSS pointer in the GDT */
701 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
702#endif
703
704 /* Clear %gs. */
705 asm volatile ("mov %0, %%gs" : : "r" (0));
706
707 /* Clear all 6 debug registers: */
708 set_debugreg(0, 0);
709 set_debugreg(0, 1);
710 set_debugreg(0, 2);
711 set_debugreg(0, 3);
712 set_debugreg(0, 6);
713 set_debugreg(0, 7);
714
715 /*
716 * Force FPU initialization:
717 */
718 current_thread_info()->status = 0;
719 clear_used_math();
720 mxcsr_feature_mask_init();
721}
722
723#ifdef CONFIG_HOTPLUG_CPU
724void __cpuinit cpu_uninit(void)
725{
726 int cpu = raw_smp_processor_id();
727 cpu_clear(cpu, cpu_initialized);
728
729 /* lazy TLB state */
730 per_cpu(cpu_tlbstate, cpu).state = 0;
731 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
732}
733#endif
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
new file mode 100644
index 000000000000..2f6432cef6ff
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -0,0 +1,28 @@
1
2struct cpu_model_info {
3 int vendor;
4 int family;
5 char *model_names[16];
6};
7
8/* attempt to consolidate cpu attributes */
9struct cpu_dev {
10 char * c_vendor;
11
12 /* some have two possibilities for cpuid string */
13 char * c_ident[2];
14
15 struct cpu_model_info c_models[4];
16
17 void (*c_init)(struct cpuinfo_x86 * c);
18 void (*c_identify)(struct cpuinfo_x86 * c);
19 unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
20};
21
22extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
23
24extern int get_model_name(struct cpuinfo_x86 *c);
25extern void display_cacheinfo(struct cpuinfo_x86 *c);
26
27extern void early_intel_workaround(struct cpuinfo_x86 *c);
28
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
new file mode 100644
index 000000000000..122d2d75aa9f
--- /dev/null
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -0,0 +1,463 @@
1#include <linux/init.h>
2#include <linux/bitops.h>
3#include <linux/delay.h>
4#include <linux/pci.h>
5#include <asm/dma.h>
6#include <asm/io.h>
7#include <asm/processor-cyrix.h>
8#include <asm/timer.h>
9#include <asm/pci-direct.h>
10#include <asm/tsc.h>
11
12#include "cpu.h"
13
14/*
15 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
16 */
17static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
18{
19 unsigned char ccr2, ccr3;
20 unsigned long flags;
21
22 /* we test for DEVID by checking whether CCR3 is writable */
23 local_irq_save(flags);
24 ccr3 = getCx86(CX86_CCR3);
25 setCx86(CX86_CCR3, ccr3 ^ 0x80);
26 getCx86(0xc0); /* dummy to change bus */
27
28 if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */
29 ccr2 = getCx86(CX86_CCR2);
30 setCx86(CX86_CCR2, ccr2 ^ 0x04);
31 getCx86(0xc0); /* dummy */
32
33 if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
34 *dir0 = 0xfd;
35 else { /* Cx486S A step */
36 setCx86(CX86_CCR2, ccr2);
37 *dir0 = 0xfe;
38 }
39 }
40 else {
41 setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
42
43 /* read DIR0 and DIR1 CPU registers */
44 *dir0 = getCx86(CX86_DIR0);
45 *dir1 = getCx86(CX86_DIR1);
46 }
47 local_irq_restore(flags);
48}
49
50/*
51 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
52 * order to identify the Cyrix CPU model after we're out of setup.c
53 *
54 * Actually since bugs.h doesn't even reference this perhaps someone should
55 * fix the documentation ???
56 */
57static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
58
59static char Cx86_model[][9] __cpuinitdata = {
60 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
61 "M II ", "Unknown"
62};
63static char Cx486_name[][5] __cpuinitdata = {
64 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
65 "SRx2", "DRx2"
66};
67static char Cx486S_name[][4] __cpuinitdata = {
68 "S", "S2", "Se", "S2e"
69};
70static char Cx486D_name[][4] __cpuinitdata = {
71 "DX", "DX2", "?", "?", "?", "DX4"
72};
73static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
74static char cyrix_model_mult1[] __cpuinitdata = "12??43";
75static char cyrix_model_mult2[] __cpuinitdata = "12233445";
76
77/*
78 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
79 * BIOSes for compatibility with DOS games. This makes the udelay loop
80 * work correctly, and improves performance.
81 *
82 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
83 */
84
85extern void calibrate_delay(void) __init;
86
87static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
88{
89 unsigned long flags;
90
91 if (Cx86_dir0_msb == 3) {
92 unsigned char ccr3, ccr5;
93
94 local_irq_save(flags);
95 ccr3 = getCx86(CX86_CCR3);
96 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
97 ccr5 = getCx86(CX86_CCR5);
98 if (ccr5 & 2)
99 setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */
100 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
101 local_irq_restore(flags);
102
103 if (ccr5 & 2) { /* possible wrong calibration done */
104 printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
105 calibrate_delay();
106 c->loops_per_jiffy = loops_per_jiffy;
107 }
108 }
109}
110
111
112static void __cpuinit set_cx86_reorder(void)
113{
114 u8 ccr3;
115
116 printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
117 ccr3 = getCx86(CX86_CCR3);
118 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
119
120 /* Load/Store Serialize to mem access disable (=reorder it)  */
121 setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
122 /* set load/store serialize from 1GB to 4GB */
123 ccr3 |= 0xe0;
124 setCx86(CX86_CCR3, ccr3);
125}
126
127static void __cpuinit set_cx86_memwb(void)
128{
129 u32 cr0;
130
131 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
132
133 /* CCR2 bit 2: unlock NW bit */
134 setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
135 /* set 'Not Write-through' */
136 cr0 = 0x20000000;
137 write_cr0(read_cr0() | cr0);
138 /* CCR2 bit 2: lock NW bit and set WT1 */
139 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
140}
141
142static void __cpuinit set_cx86_inc(void)
143{
144 unsigned char ccr3;
145
146 printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
147
148 ccr3 = getCx86(CX86_CCR3);
149 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
150 /* PCR1 -- Performance Control */
151 /* Incrementor on, whatever that is */
152 setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
153 /* PCR0 -- Performance Control */
154 /* Incrementor Margin 10 */
155 setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
156 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
157}
158
159/*
160 * Configure later MediaGX and/or Geode processor.
161 */
162
163static void __cpuinit geode_configure(void)
164{
165 unsigned long flags;
166 u8 ccr3;
167 local_irq_save(flags);
168
169 /* Suspend on halt power saving and enable #SUSP pin */
170 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
171
172 ccr3 = getCx86(CX86_CCR3);
173 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
174
175
176 /* FPU fast, DTE cache, Mem bypass */
177 setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
178 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
179
180 set_cx86_memwb();
181 set_cx86_reorder();
182 set_cx86_inc();
183
184 local_irq_restore(flags);
185}
186
187
188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
189{
190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
191 char *buf = c->x86_model_id;
192 const char *p = NULL;
193
194 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
195 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
196 clear_bit(0*32+31, c->x86_capability);
197
198 /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
199 if ( test_bit(1*32+24, c->x86_capability) ) {
200 clear_bit(1*32+24, c->x86_capability);
201 set_bit(X86_FEATURE_CXMMX, c->x86_capability);
202 }
203
204 do_cyrix_devid(&dir0, &dir1);
205
206 check_cx686_slop(c);
207
208 Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */
209 dir0_lsn = dir0 & 0xf; /* model or clock multiplier */
210
211 /* common case step number/rev -- exceptions handled below */
212 c->x86_model = (dir1 >> 4) + 1;
213 c->x86_mask = dir1 & 0xf;
214
215 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
216 * We do the same thing for each generation: we work out
217 * the model, multiplier and stepping. Black magic included,
218 * to make the silicon step/rev numbers match the printed ones.
219 */
220
221 switch (dir0_msn) {
222 unsigned char tmp;
223
224 case 0: /* Cx486SLC/DLC/SRx/DRx */
225 p = Cx486_name[dir0_lsn & 7];
226 break;
227
228 case 1: /* Cx486S/DX/DX2/DX4 */
229 p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
230 : Cx486S_name[dir0_lsn & 3];
231 break;
232
233 case 2: /* 5x86 */
234 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
235 p = Cx86_cb+2;
236 break;
237
238 case 3: /* 6x86/6x86L */
239 Cx86_cb[1] = ' ';
240 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
241 if (dir1 > 0x21) { /* 686L */
242 Cx86_cb[0] = 'L';
243 p = Cx86_cb;
244 (c->x86_model)++;
245 } else /* 686 */
246 p = Cx86_cb+1;
247 /* Emulate MTRRs using Cyrix's ARRs. */
248 set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
249 /* 6x86's contain this bug */
250 c->coma_bug = 1;
251 break;
252
253 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
254#ifdef CONFIG_PCI
255 {
256 u32 vendor, device;
257 /* It isn't really a PCI quirk directly, but the cure is the
258 same. The MediaGX has deep magic SMM stuff that handles the
259 SB emulation. It thows away the fifo on disable_dma() which
260 is wrong and ruins the audio.
261
262 Bug2: VSA1 has a wrap bug so that using maximum sized DMA
263 causes bad things. According to NatSemi VSA2 has another
264 bug to do with 'hlt'. I've not seen any boards using VSA2
265 and X doesn't seem to support it either so who cares 8).
266 VSA1 we work around however.
267 */
268
269 printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
270 isa_dma_bridge_buggy = 2;
271
272 /* We do this before the PCI layer is running. However we
273 are safe here as we know the bridge must be a Cyrix
274 companion and must be present */
275 vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID);
276 device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID);
277
278 /*
279 * The 5510/5520 companion chips have a funky PIT.
280 */
281 if (vendor == PCI_VENDOR_ID_CYRIX &&
282 (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
283 mark_tsc_unstable("cyrix 5510/5520 detected");
284 }
285#endif
286 c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
287
288 /* GXm supports extended cpuid levels 'ala' AMD */
289 if (c->cpuid_level == 2) {
290 /* Enable cxMMX extensions (GX1 Datasheet 54) */
291 setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
292
293 /*
294 * GXm : 0x30 ... 0x5f GXm datasheet 51
295 * GXlv: 0x6x GXlv datasheet 54
296 * ? : 0x7x
297 * GX1 : 0x8x GX1 datasheet 56
298 */
299 if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f))
300 geode_configure();
301 get_model_name(c); /* get CPU marketing name */
302 return;
303 }
304 else { /* MediaGX */
305 Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
306 p = Cx86_cb+2;
307 c->x86_model = (dir1 & 0x20) ? 1 : 2;
308 }
309 break;
310
311 case 5: /* 6x86MX/M II */
312 if (dir1 > 7)
313 {
314 dir0_msn++; /* M II */
315 /* Enable MMX extensions (App note 108) */
316 setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
317 }
318 else
319 {
320 c->coma_bug = 1; /* 6x86MX, it has the bug. */
321 }
322 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
323 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
324 p = Cx86_cb+tmp;
325 if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
326 (c->x86_model)++;
327 /* Emulate MTRRs using Cyrix's ARRs. */
328 set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
329 break;
330
331 case 0xf: /* Cyrix 486 without DEVID registers */
332 switch (dir0_lsn) {
333 case 0xd: /* either a 486SLC or DLC w/o DEVID */
334 dir0_msn = 0;
335 p = Cx486_name[(c->hard_math) ? 1 : 0];
336 break;
337
338 case 0xe: /* a 486S A step */
339 dir0_msn = 0;
340 p = Cx486S_name[0];
341 break;
342 }
343 break;
344
345 default: /* unknown (shouldn't happen, we know everyone ;-) */
346 dir0_msn = 7;
347 break;
348 }
349 strcpy(buf, Cx86_model[dir0_msn & 7]);
350 if (p) strcat(buf, p);
351 return;
352}
353
354/*
355 * Handle National Semiconductor branded processors
356 */
357static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
358{
359 /* There may be GX1 processors in the wild that are branded
360 * NSC and not Cyrix.
361 *
362 * This function only handles the GX processor, and kicks every
363 * thing else to the Cyrix init function above - that should
364 * cover any processors that might have been branded differently
365 * after NSC acquired Cyrix.
366 *
367 * If this breaks your GX1 horribly, please e-mail
368 * info-linux@ldcmail.amd.com to tell us.
369 */
370
371 /* Handle the GX (Formally known as the GX2) */
372
373 if (c->x86 == 5 && c->x86_model == 5)
374 display_cacheinfo(c);
375 else
376 init_cyrix(c);
377}
378
379/*
380 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
381 * by the fact that they preserve the flags across the division of 5/2.
382 * PII and PPro exhibit this behavior too, but they have cpuid available.
383 */
384
385/*
386 * Perform the Cyrix 5/2 test. A Cyrix won't change
387 * the flags, while other 486 chips will.
388 */
389static inline int test_cyrix_52div(void)
390{
391 unsigned int test;
392
393 __asm__ __volatile__(
394 "sahf\n\t" /* clear flags (%eax = 0x0005) */
395 "div %b2\n\t" /* divide 5 by 2 */
396 "lahf" /* store flags into %ah */
397 : "=a" (test)
398 : "0" (5), "q" (2)
399 : "cc");
400
401 /* AH is 0x02 on Cyrix after the divide.. */
402 return (unsigned char) (test >> 8) == 0x02;
403}
404
405static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c)
406{
407 /* Detect Cyrix with disabled CPUID */
408 if ( c->x86 == 4 && test_cyrix_52div() ) {
409 unsigned char dir0, dir1;
410
411 strcpy(c->x86_vendor_id, "CyrixInstead");
412 c->x86_vendor = X86_VENDOR_CYRIX;
413
414 /* Actually enable cpuid on the older cyrix */
415
416 /* Retrieve CPU revisions */
417
418 do_cyrix_devid(&dir0, &dir1);
419
420 dir0>>=4;
421
422 /* Check it is an affected model */
423
424 if (dir0 == 5 || dir0 == 3)
425 {
426 unsigned char ccr3;
427 unsigned long flags;
428 printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
429 local_irq_save(flags);
430 ccr3 = getCx86(CX86_CCR3);
431 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
432 setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */
433 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
434 local_irq_restore(flags);
435 }
436 }
437}
438
439static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
440 .c_vendor = "Cyrix",
441 .c_ident = { "CyrixInstead" },
442 .c_init = init_cyrix,
443 .c_identify = cyrix_identify,
444};
445
446int __init cyrix_init_cpu(void)
447{
448 cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
449 return 0;
450}
451
452static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
453 .c_vendor = "NSC",
454 .c_ident = { "Geode by NSC" },
455 .c_init = init_nsc,
456};
457
458int __init nsc_init_cpu(void)
459{
460 cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
461 return 0;
462}
463
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
new file mode 100644
index 000000000000..dc4e08147b1f
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel.c
@@ -0,0 +1,333 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3
4#include <linux/string.h>
5#include <linux/bitops.h>
6#include <linux/smp.h>
7#include <linux/thread_info.h>
8#include <linux/module.h>
9
10#include <asm/processor.h>
11#include <asm/msr.h>
12#include <asm/uaccess.h>
13
14#include "cpu.h"
15
16#ifdef CONFIG_X86_LOCAL_APIC
17#include <asm/mpspec.h>
18#include <asm/apic.h>
19#include <mach_apic.h>
20#endif
21
22extern int trap_init_f00f_bug(void);
23
24#ifdef CONFIG_X86_INTEL_USERCOPY
25/*
26 * Alignment at which movsl is preferred for bulk memory copies.
27 */
28struct movsl_mask movsl_mask __read_mostly;
29#endif
30
31void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
32{
33 if (c->x86_vendor != X86_VENDOR_INTEL)
34 return;
35 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
36 if (c->x86 == 15 && c->x86_cache_alignment == 64)
37 c->x86_cache_alignment = 128;
38}
39
40/*
41 * Early probe support logic for ppro memory erratum #50
42 *
43 * This is called before we do cpu ident work
44 */
45
46int __cpuinit ppro_with_ram_bug(void)
47{
48 /* Uses data from early_cpu_detect now */
49 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
50 boot_cpu_data.x86 == 6 &&
51 boot_cpu_data.x86_model == 1 &&
52 boot_cpu_data.x86_mask < 8) {
53 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
54 return 1;
55 }
56 return 0;
57}
58
59
60/*
61 * P4 Xeon errata 037 workaround.
62 * Hardware prefetcher may cause stale data to be loaded into the cache.
63 */
64static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
65{
66 unsigned long lo, hi;
67
68 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
69 rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
70 if ((lo & (1<<9)) == 0) {
71 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
72 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
73 lo |= (1<<9); /* Disable hw prefetching */
74 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
75 }
76 }
77}
78
79
80/*
81 * find out the number of processor cores on the die
82 */
83static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
84{
85 unsigned int eax, ebx, ecx, edx;
86
87 if (c->cpuid_level < 4)
88 return 1;
89
90 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
91 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
92 if (eax & 0x1f)
93 return ((eax >> 26) + 1);
94 else
95 return 1;
96}
97
98static void __cpuinit init_intel(struct cpuinfo_x86 *c)
99{
100 unsigned int l2 = 0;
101 char *p = NULL;
102
103#ifdef CONFIG_X86_F00F_BUG
104 /*
105 * All current models of Pentium and Pentium with MMX technology CPUs
106 * have the F0 0F bug, which lets nonprivileged users lock up the system.
107 * Note that the workaround only should be initialized once...
108 */
109 c->f00f_bug = 0;
110 if (!paravirt_enabled() && c->x86 == 5) {
111 static int f00f_workaround_enabled = 0;
112
113 c->f00f_bug = 1;
114 if ( !f00f_workaround_enabled ) {
115 trap_init_f00f_bug();
116 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
117 f00f_workaround_enabled = 1;
118 }
119 }
120#endif
121
122 select_idle_routine(c);
123 l2 = init_intel_cacheinfo(c);
124 if (c->cpuid_level > 9 ) {
125 unsigned eax = cpuid_eax(10);
126 /* Check for version and the number of counters */
127 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
128 set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
129 }
130
131 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
132 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
133 clear_bit(X86_FEATURE_SEP, c->x86_capability);
134
135 /* Names for the Pentium II/Celeron processors
136 detectable only by also checking the cache size.
137 Dixon is NOT a Celeron. */
138 if (c->x86 == 6) {
139 switch (c->x86_model) {
140 case 5:
141 if (c->x86_mask == 0) {
142 if (l2 == 0)
143 p = "Celeron (Covington)";
144 else if (l2 == 256)
145 p = "Mobile Pentium II (Dixon)";
146 }
147 break;
148
149 case 6:
150 if (l2 == 128)
151 p = "Celeron (Mendocino)";
152 else if (c->x86_mask == 0 || c->x86_mask == 5)
153 p = "Celeron-A";
154 break;
155
156 case 8:
157 if (l2 == 128)
158 p = "Celeron (Coppermine)";
159 break;
160 }
161 }
162
163 if ( p )
164 strcpy(c->x86_model_id, p);
165
166 c->x86_max_cores = num_cpu_cores(c);
167
168 detect_ht(c);
169
170 /* Work around errata */
171 Intel_errata_workarounds(c);
172
173#ifdef CONFIG_X86_INTEL_USERCOPY
174 /*
175 * Set up the preferred alignment for movsl bulk memory moves
176 */
177 switch (c->x86) {
178 case 4: /* 486: untested */
179 break;
180 case 5: /* Old Pentia: untested */
181 break;
182 case 6: /* PII/PIII only like movsl with 8-byte alignment */
183 movsl_mask.mask = 7;
184 break;
185 case 15: /* P4 is OK down to 8-byte alignment */
186 movsl_mask.mask = 7;
187 break;
188 }
189#endif
190
191 if (c->x86 == 15) {
192 set_bit(X86_FEATURE_P4, c->x86_capability);
193 set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability);
194 }
195 if (c->x86 == 6)
196 set_bit(X86_FEATURE_P3, c->x86_capability);
197 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
198 (c->x86 == 0x6 && c->x86_model >= 0x0e))
199 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
200
201 if (cpu_has_ds) {
202 unsigned int l1;
203 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
204 if (!(l1 & (1<<11)))
205 set_bit(X86_FEATURE_BTS, c->x86_capability);
206 if (!(l1 & (1<<12)))
207 set_bit(X86_FEATURE_PEBS, c->x86_capability);
208 }
209}
210
211static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
212{
213 /* Intel PIII Tualatin. This comes in two flavours.
214 * One has 256kb of cache, the other 512. We have no way
215 * to determine which, so we use a boottime override
216 * for the 512kb model, and assume 256 otherwise.
217 */
218 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
219 size = 256;
220 return size;
221}
222
223static struct cpu_dev intel_cpu_dev __cpuinitdata = {
224 .c_vendor = "Intel",
225 .c_ident = { "GenuineIntel" },
226 .c_models = {
227 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
228 {
229 [0] = "486 DX-25/33",
230 [1] = "486 DX-50",
231 [2] = "486 SX",
232 [3] = "486 DX/2",
233 [4] = "486 SL",
234 [5] = "486 SX/2",
235 [7] = "486 DX/2-WB",
236 [8] = "486 DX/4",
237 [9] = "486 DX/4-WB"
238 }
239 },
240 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
241 {
242 [0] = "Pentium 60/66 A-step",
243 [1] = "Pentium 60/66",
244 [2] = "Pentium 75 - 200",
245 [3] = "OverDrive PODP5V83",
246 [4] = "Pentium MMX",
247 [7] = "Mobile Pentium 75 - 200",
248 [8] = "Mobile Pentium MMX"
249 }
250 },
251 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
252 {
253 [0] = "Pentium Pro A-step",
254 [1] = "Pentium Pro",
255 [3] = "Pentium II (Klamath)",
256 [4] = "Pentium II (Deschutes)",
257 [5] = "Pentium II (Deschutes)",
258 [6] = "Mobile Pentium II",
259 [7] = "Pentium III (Katmai)",
260 [8] = "Pentium III (Coppermine)",
261 [10] = "Pentium III (Cascades)",
262 [11] = "Pentium III (Tualatin)",
263 }
264 },
265 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
266 {
267 [0] = "Pentium 4 (Unknown)",
268 [1] = "Pentium 4 (Willamette)",
269 [2] = "Pentium 4 (Northwood)",
270 [4] = "Pentium 4 (Foster)",
271 [5] = "Pentium 4 (Foster)",
272 }
273 },
274 },
275 .c_init = init_intel,
276 .c_size_cache = intel_size_cache,
277};
278
279__init int intel_cpu_init(void)
280{
281 cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
282 return 0;
283}
284
285#ifndef CONFIG_X86_CMPXCHG
286unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
287{
288 u8 prev;
289 unsigned long flags;
290
291 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
292 local_irq_save(flags);
293 prev = *(u8 *)ptr;
294 if (prev == old)
295 *(u8 *)ptr = new;
296 local_irq_restore(flags);
297 return prev;
298}
299EXPORT_SYMBOL(cmpxchg_386_u8);
300
301unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
302{
303 u16 prev;
304 unsigned long flags;
305
306 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
307 local_irq_save(flags);
308 prev = *(u16 *)ptr;
309 if (prev == old)
310 *(u16 *)ptr = new;
311 local_irq_restore(flags);
312 return prev;
313}
314EXPORT_SYMBOL(cmpxchg_386_u16);
315
316unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
317{
318 u32 prev;
319 unsigned long flags;
320
321 /* Poor man's cmpxchg for 386. Unsuitable for SMP */
322 local_irq_save(flags);
323 prev = *(u32 *)ptr;
324 if (prev == old)
325 *(u32 *)ptr = new;
326 local_irq_restore(flags);
327 return prev;
328}
329EXPORT_SYMBOL(cmpxchg_386_u32);
330#endif
331
332// arch_initcall(intel_cpu_init);
333
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
new file mode 100644
index 000000000000..db6c25aa5776
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -0,0 +1,806 @@
1/*
2 * Routines to indentify caches on Intel CPU.
3 *
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
15#include <linux/sched.h>
16
17#include <asm/processor.h>
18#include <asm/smp.h>
19
20#define LVL_1_INST 1
21#define LVL_1_DATA 2
22#define LVL_2 3
23#define LVL_3 4
24#define LVL_TRACE 5
25
26struct _cache_table
27{
28 unsigned char descriptor;
29 char cache_type;
30 short size;
31};
32
33/* all the cache descriptor types we care about (no TLB or trace cache entries) */
34static struct _cache_table cache_table[] __cpuinitdata =
35{
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
53 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
54 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
55 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
56 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
57 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
58 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
59 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
60 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
61 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
64 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
65 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
66 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
69 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
70 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
71 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
72 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
73 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
74 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
78 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
79 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
80 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
81 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
82 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
83 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
84 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
85 { 0x00, 0, 0}
86};
87
88
89enum _cache_type
90{
91 CACHE_TYPE_NULL = 0,
92 CACHE_TYPE_DATA = 1,
93 CACHE_TYPE_INST = 2,
94 CACHE_TYPE_UNIFIED = 3
95};
96
97union _cpuid4_leaf_eax {
98 struct {
99 enum _cache_type type:5;
100 unsigned int level:3;
101 unsigned int is_self_initializing:1;
102 unsigned int is_fully_associative:1;
103 unsigned int reserved:4;
104 unsigned int num_threads_sharing:12;
105 unsigned int num_cores_on_die:6;
106 } split;
107 u32 full;
108};
109
110union _cpuid4_leaf_ebx {
111 struct {
112 unsigned int coherency_line_size:12;
113 unsigned int physical_line_partition:10;
114 unsigned int ways_of_associativity:10;
115 } split;
116 u32 full;
117};
118
119union _cpuid4_leaf_ecx {
120 struct {
121 unsigned int number_of_sets:32;
122 } split;
123 u32 full;
124};
125
126struct _cpuid4_info {
127 union _cpuid4_leaf_eax eax;
128 union _cpuid4_leaf_ebx ebx;
129 union _cpuid4_leaf_ecx ecx;
130 unsigned long size;
131 cpumask_t shared_cpu_map;
132};
133
134unsigned short num_cache_leaves;
135
136/* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139
140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */
142union l1_cache {
143 struct {
144 unsigned line_size : 8;
145 unsigned lines_per_tag : 8;
146 unsigned assoc : 8;
147 unsigned size_in_kb : 8;
148 };
149 unsigned val;
150};
151
152union l2_cache {
153 struct {
154 unsigned line_size : 8;
155 unsigned lines_per_tag : 4;
156 unsigned assoc : 4;
157 unsigned size_in_kb : 16;
158 };
159 unsigned val;
160};
161
162union l3_cache {
163 struct {
164 unsigned line_size : 8;
165 unsigned lines_per_tag : 4;
166 unsigned assoc : 4;
167 unsigned res : 2;
168 unsigned size_encoded : 14;
169 };
170 unsigned val;
171};
172
173static const unsigned short assocs[] = {
174 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
175 [8] = 16, [0xa] = 32, [0xb] = 48,
176 [0xc] = 64,
177 [0xf] = 0xffff // ??
178};
179
180static const unsigned char levels[] = { 1, 1, 2, 3 };
181static const unsigned char types[] = { 1, 2, 3, 3 };
182
183static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
184 union _cpuid4_leaf_ebx *ebx,
185 union _cpuid4_leaf_ecx *ecx)
186{
187 unsigned dummy;
188 unsigned line_size, lines_per_tag, assoc, size_in_kb;
189 union l1_cache l1i, l1d;
190 union l2_cache l2;
191 union l3_cache l3;
192 union l1_cache *l1 = &l1d;
193
194 eax->full = 0;
195 ebx->full = 0;
196 ecx->full = 0;
197
198 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
199 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
200
201 switch (leaf) {
202 case 1:
203 l1 = &l1i;
204 case 0:
205 if (!l1->val)
206 return;
207 assoc = l1->assoc;
208 line_size = l1->line_size;
209 lines_per_tag = l1->lines_per_tag;
210 size_in_kb = l1->size_in_kb;
211 break;
212 case 2:
213 if (!l2.val)
214 return;
215 assoc = l2.assoc;
216 line_size = l2.line_size;
217 lines_per_tag = l2.lines_per_tag;
218 /* cpu_data has errata corrections for K7 applied */
219 size_in_kb = current_cpu_data.x86_cache_size;
220 break;
221 case 3:
222 if (!l3.val)
223 return;
224 assoc = l3.assoc;
225 line_size = l3.line_size;
226 lines_per_tag = l3.lines_per_tag;
227 size_in_kb = l3.size_encoded * 512;
228 break;
229 default:
230 return;
231 }
232
233 eax->split.is_self_initializing = 1;
234 eax->split.type = types[leaf];
235 eax->split.level = levels[leaf];
236 if (leaf == 3)
237 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
238 else
239 eax->split.num_threads_sharing = 0;
240 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
241
242
243 if (assoc == 0xf)
244 eax->split.is_fully_associative = 1;
245 ebx->split.coherency_line_size = line_size - 1;
246 ebx->split.ways_of_associativity = assocs[assoc] - 1;
247 ebx->split.physical_line_partition = lines_per_tag - 1;
248 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
249 (ebx->split.ways_of_associativity + 1) - 1;
250}
251
252static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
253{
254 union _cpuid4_leaf_eax eax;
255 union _cpuid4_leaf_ebx ebx;
256 union _cpuid4_leaf_ecx ecx;
257 unsigned edx;
258
259 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
260 amd_cpuid4(index, &eax, &ebx, &ecx);
261 else
262 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
263 if (eax.split.type == CACHE_TYPE_NULL)
264 return -EIO; /* better error ? */
265
266 this_leaf->eax = eax;
267 this_leaf->ebx = ebx;
268 this_leaf->ecx = ecx;
269 this_leaf->size = (ecx.split.number_of_sets + 1) *
270 (ebx.split.coherency_line_size + 1) *
271 (ebx.split.physical_line_partition + 1) *
272 (ebx.split.ways_of_associativity + 1);
273 return 0;
274}
275
276static int __cpuinit find_num_cache_leaves(void)
277{
278 unsigned int eax, ebx, ecx, edx;
279 union _cpuid4_leaf_eax cache_eax;
280 int i = -1;
281
282 do {
283 ++i;
284 /* Do cpuid(4) loop to find out num_cache_leaves */
285 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
286 cache_eax.full = eax;
287 } while (cache_eax.split.type != CACHE_TYPE_NULL);
288 return i;
289}
290
291unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
292{
293 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
294 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
297#ifdef CONFIG_X86_HT
298 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
299#endif
300
301 if (c->cpuid_level > 3) {
302 static int is_initialized;
303
304 if (is_initialized == 0) {
305 /* Init num_cache_leaves from boot CPU */
306 num_cache_leaves = find_num_cache_leaves();
307 is_initialized++;
308 }
309
310 /*
311 * Whenever possible use cpuid(4), deterministic cache
312 * parameters cpuid leaf to find the cache details
313 */
314 for (i = 0; i < num_cache_leaves; i++) {
315 struct _cpuid4_info this_leaf;
316
317 int retval;
318
319 retval = cpuid4_cache_lookup(i, &this_leaf);
320 if (retval >= 0) {
321 switch(this_leaf.eax.split.level) {
322 case 1:
323 if (this_leaf.eax.split.type ==
324 CACHE_TYPE_DATA)
325 new_l1d = this_leaf.size/1024;
326 else if (this_leaf.eax.split.type ==
327 CACHE_TYPE_INST)
328 new_l1i = this_leaf.size/1024;
329 break;
330 case 2:
331 new_l2 = this_leaf.size/1024;
332 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
333 index_msb = get_count_order(num_threads_sharing);
334 l2_id = c->apicid >> index_msb;
335 break;
336 case 3:
337 new_l3 = this_leaf.size/1024;
338 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
339 index_msb = get_count_order(num_threads_sharing);
340 l3_id = c->apicid >> index_msb;
341 break;
342 default:
343 break;
344 }
345 }
346 }
347 }
348 /*
349 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
350 * trace cache
351 */
352 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
353 /* supports eax=2 call */
354 int i, j, n;
355 int regs[4];
356 unsigned char *dp = (unsigned char *)regs;
357 int only_trace = 0;
358
359 if (num_cache_leaves != 0 && c->x86 == 15)
360 only_trace = 1;
361
362 /* Number of times to iterate */
363 n = cpuid_eax(2) & 0xFF;
364
365 for ( i = 0 ; i < n ; i++ ) {
366 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
367
368 /* If bit 31 is set, this is an unknown format */
369 for ( j = 0 ; j < 3 ; j++ ) {
370 if ( regs[j] < 0 ) regs[j] = 0;
371 }
372
373 /* Byte 0 is level count, not a descriptor */
374 for ( j = 1 ; j < 16 ; j++ ) {
375 unsigned char des = dp[j];
376 unsigned char k = 0;
377
378 /* look up this descriptor in the table */
379 while (cache_table[k].descriptor != 0)
380 {
381 if (cache_table[k].descriptor == des) {
382 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
383 break;
384 switch (cache_table[k].cache_type) {
385 case LVL_1_INST:
386 l1i += cache_table[k].size;
387 break;
388 case LVL_1_DATA:
389 l1d += cache_table[k].size;
390 break;
391 case LVL_2:
392 l2 += cache_table[k].size;
393 break;
394 case LVL_3:
395 l3 += cache_table[k].size;
396 break;
397 case LVL_TRACE:
398 trace += cache_table[k].size;
399 break;
400 }
401
402 break;
403 }
404
405 k++;
406 }
407 }
408 }
409 }
410
411 if (new_l1d)
412 l1d = new_l1d;
413
414 if (new_l1i)
415 l1i = new_l1i;
416
417 if (new_l2) {
418 l2 = new_l2;
419#ifdef CONFIG_X86_HT
420 cpu_llc_id[cpu] = l2_id;
421#endif
422 }
423
424 if (new_l3) {
425 l3 = new_l3;
426#ifdef CONFIG_X86_HT
427 cpu_llc_id[cpu] = l3_id;
428#endif
429 }
430
431 if (trace)
432 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
433 else if ( l1i )
434 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
435
436 if (l1d)
437 printk(", L1 D cache: %dK\n", l1d);
438 else
439 printk("\n");
440
441 if (l2)
442 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
443
444 if (l3)
445 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
446
447 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
448
449 return l2;
450}
451
452/* pointer to _cpuid4_info array (for each cache leaf) */
453static struct _cpuid4_info *cpuid4_info[NR_CPUS];
454#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
455
456#ifdef CONFIG_SMP
457static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
458{
459 struct _cpuid4_info *this_leaf, *sibling_leaf;
460 unsigned long num_threads_sharing;
461 int index_msb, i;
462 struct cpuinfo_x86 *c = cpu_data;
463
464 this_leaf = CPUID4_INFO_IDX(cpu, index);
465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
466
467 if (num_threads_sharing == 1)
468 cpu_set(cpu, this_leaf->shared_cpu_map);
469 else {
470 index_msb = get_count_order(num_threads_sharing);
471
472 for_each_online_cpu(i) {
473 if (c[i].apicid >> index_msb ==
474 c[cpu].apicid >> index_msb) {
475 cpu_set(i, this_leaf->shared_cpu_map);
476 if (i != cpu && cpuid4_info[i]) {
477 sibling_leaf = CPUID4_INFO_IDX(i, index);
478 cpu_set(cpu, sibling_leaf->shared_cpu_map);
479 }
480 }
481 }
482 }
483}
484static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
485{
486 struct _cpuid4_info *this_leaf, *sibling_leaf;
487 int sibling;
488
489 this_leaf = CPUID4_INFO_IDX(cpu, index);
490 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
491 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
492 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
493 }
494}
495#else
496static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
497static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
498#endif
499
500static void free_cache_attributes(unsigned int cpu)
501{
502 kfree(cpuid4_info[cpu]);
503 cpuid4_info[cpu] = NULL;
504}
505
506static int __cpuinit detect_cache_attributes(unsigned int cpu)
507{
508 struct _cpuid4_info *this_leaf;
509 unsigned long j;
510 int retval;
511 cpumask_t oldmask;
512
513 if (num_cache_leaves == 0)
514 return -ENOENT;
515
516 cpuid4_info[cpu] = kzalloc(
517 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
518 if (cpuid4_info[cpu] == NULL)
519 return -ENOMEM;
520
521 oldmask = current->cpus_allowed;
522 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
523 if (retval)
524 goto out;
525
526 /* Do cpuid and store the results */
527 retval = 0;
528 for (j = 0; j < num_cache_leaves; j++) {
529 this_leaf = CPUID4_INFO_IDX(cpu, j);
530 retval = cpuid4_cache_lookup(j, this_leaf);
531 if (unlikely(retval < 0))
532 break;
533 cache_shared_cpu_map_setup(cpu, j);
534 }
535 set_cpus_allowed(current, oldmask);
536
537out:
538 if (retval)
539 free_cache_attributes(cpu);
540 return retval;
541}
542
543#ifdef CONFIG_SYSFS
544
545#include <linux/kobject.h>
546#include <linux/sysfs.h>
547
548extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
549
550/* pointer to kobject for cpuX/cache */
551static struct kobject * cache_kobject[NR_CPUS];
552
553struct _index_kobject {
554 struct kobject kobj;
555 unsigned int cpu;
556 unsigned short index;
557};
558
559/* pointer to array of kobjects for cpuX/cache/indexY */
560static struct _index_kobject *index_kobject[NR_CPUS];
561#define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
562
563#define show_one_plus(file_name, object, val) \
564static ssize_t show_##file_name \
565 (struct _cpuid4_info *this_leaf, char *buf) \
566{ \
567 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
568}
569
570show_one_plus(level, eax.split.level, 0);
571show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
572show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
573show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
574show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
575
576static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
577{
578 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
579}
580
581static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
582{
583 char mask_str[NR_CPUS];
584 cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
585 return sprintf(buf, "%s\n", mask_str);
586}
587
588static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
589 switch(this_leaf->eax.split.type) {
590 case CACHE_TYPE_DATA:
591 return sprintf(buf, "Data\n");
592 break;
593 case CACHE_TYPE_INST:
594 return sprintf(buf, "Instruction\n");
595 break;
596 case CACHE_TYPE_UNIFIED:
597 return sprintf(buf, "Unified\n");
598 break;
599 default:
600 return sprintf(buf, "Unknown\n");
601 break;
602 }
603}
604
605struct _cache_attr {
606 struct attribute attr;
607 ssize_t (*show)(struct _cpuid4_info *, char *);
608 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
609};
610
611#define define_one_ro(_name) \
612static struct _cache_attr _name = \
613 __ATTR(_name, 0444, show_##_name, NULL)
614
615define_one_ro(level);
616define_one_ro(type);
617define_one_ro(coherency_line_size);
618define_one_ro(physical_line_partition);
619define_one_ro(ways_of_associativity);
620define_one_ro(number_of_sets);
621define_one_ro(size);
622define_one_ro(shared_cpu_map);
623
624static struct attribute * default_attrs[] = {
625 &type.attr,
626 &level.attr,
627 &coherency_line_size.attr,
628 &physical_line_partition.attr,
629 &ways_of_associativity.attr,
630 &number_of_sets.attr,
631 &size.attr,
632 &shared_cpu_map.attr,
633 NULL
634};
635
636#define to_object(k) container_of(k, struct _index_kobject, kobj)
637#define to_attr(a) container_of(a, struct _cache_attr, attr)
638
639static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
640{
641 struct _cache_attr *fattr = to_attr(attr);
642 struct _index_kobject *this_leaf = to_object(kobj);
643 ssize_t ret;
644
645 ret = fattr->show ?
646 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
647 buf) :
648 0;
649 return ret;
650}
651
652static ssize_t store(struct kobject * kobj, struct attribute * attr,
653 const char * buf, size_t count)
654{
655 return 0;
656}
657
658static struct sysfs_ops sysfs_ops = {
659 .show = show,
660 .store = store,
661};
662
663static struct kobj_type ktype_cache = {
664 .sysfs_ops = &sysfs_ops,
665 .default_attrs = default_attrs,
666};
667
668static struct kobj_type ktype_percpu_entry = {
669 .sysfs_ops = &sysfs_ops,
670};
671
672static void cpuid4_cache_sysfs_exit(unsigned int cpu)
673{
674 kfree(cache_kobject[cpu]);
675 kfree(index_kobject[cpu]);
676 cache_kobject[cpu] = NULL;
677 index_kobject[cpu] = NULL;
678 free_cache_attributes(cpu);
679}
680
681static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
682{
683
684 if (num_cache_leaves == 0)
685 return -ENOENT;
686
687 detect_cache_attributes(cpu);
688 if (cpuid4_info[cpu] == NULL)
689 return -ENOENT;
690
691 /* Allocate all required memory */
692 cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
693 if (unlikely(cache_kobject[cpu] == NULL))
694 goto err_out;
695
696 index_kobject[cpu] = kzalloc(
697 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
698 if (unlikely(index_kobject[cpu] == NULL))
699 goto err_out;
700
701 return 0;
702
703err_out:
704 cpuid4_cache_sysfs_exit(cpu);
705 return -ENOMEM;
706}
707
708/* Add/Remove cache interface for CPU device */
709static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
710{
711 unsigned int cpu = sys_dev->id;
712 unsigned long i, j;
713 struct _index_kobject *this_object;
714 int retval = 0;
715
716 retval = cpuid4_cache_sysfs_init(cpu);
717 if (unlikely(retval < 0))
718 return retval;
719
720 cache_kobject[cpu]->parent = &sys_dev->kobj;
721 kobject_set_name(cache_kobject[cpu], "%s", "cache");
722 cache_kobject[cpu]->ktype = &ktype_percpu_entry;
723 retval = kobject_register(cache_kobject[cpu]);
724
725 for (i = 0; i < num_cache_leaves; i++) {
726 this_object = INDEX_KOBJECT_PTR(cpu,i);
727 this_object->cpu = cpu;
728 this_object->index = i;
729 this_object->kobj.parent = cache_kobject[cpu];
730 kobject_set_name(&(this_object->kobj), "index%1lu", i);
731 this_object->kobj.ktype = &ktype_cache;
732 retval = kobject_register(&(this_object->kobj));
733 if (unlikely(retval)) {
734 for (j = 0; j < i; j++) {
735 kobject_unregister(
736 &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
737 }
738 kobject_unregister(cache_kobject[cpu]);
739 cpuid4_cache_sysfs_exit(cpu);
740 break;
741 }
742 }
743 return retval;
744}
745
746static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
747{
748 unsigned int cpu = sys_dev->id;
749 unsigned long i;
750
751 if (cpuid4_info[cpu] == NULL)
752 return;
753 for (i = 0; i < num_cache_leaves; i++) {
754 cache_remove_shared_cpu_map(cpu, i);
755 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
756 }
757 kobject_unregister(cache_kobject[cpu]);
758 cpuid4_cache_sysfs_exit(cpu);
759 return;
760}
761
762static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
763 unsigned long action, void *hcpu)
764{
765 unsigned int cpu = (unsigned long)hcpu;
766 struct sys_device *sys_dev;
767
768 sys_dev = get_cpu_sysdev(cpu);
769 switch (action) {
770 case CPU_ONLINE:
771 case CPU_ONLINE_FROZEN:
772 cache_add_dev(sys_dev);
773 break;
774 case CPU_DEAD:
775 case CPU_DEAD_FROZEN:
776 cache_remove_dev(sys_dev);
777 break;
778 }
779 return NOTIFY_OK;
780}
781
782static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
783{
784 .notifier_call = cacheinfo_cpu_callback,
785};
786
787static int __cpuinit cache_sysfs_init(void)
788{
789 int i;
790
791 if (num_cache_leaves == 0)
792 return 0;
793
794 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
795
796 for_each_online_cpu(i) {
797 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
798 (void *)(long)i);
799 }
800
801 return 0;
802}
803
804device_initcall(cache_sysfs_init);
805
806#endif
diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c
new file mode 100644
index 000000000000..961fbe1a748f
--- /dev/null
+++ b/arch/x86/kernel/cpu/nexgen.c
@@ -0,0 +1,60 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/string.h>
4#include <asm/processor.h>
5
6#include "cpu.h"
7
8/*
9 * Detect a NexGen CPU running without BIOS hypercode new enough
10 * to have CPUID. (Thanks to Herbert Oppmann)
11 */
12
13static int __cpuinit deep_magic_nexgen_probe(void)
14{
15 int ret;
16
17 __asm__ __volatile__ (
18 " movw $0x5555, %%ax\n"
19 " xorw %%dx,%%dx\n"
20 " movw $2, %%cx\n"
21 " divw %%cx\n"
22 " movl $0, %%eax\n"
23 " jnz 1f\n"
24 " movl $1, %%eax\n"
25 "1:\n"
26 : "=a" (ret) : : "cx", "dx" );
27 return ret;
28}
29
30static void __cpuinit init_nexgen(struct cpuinfo_x86 * c)
31{
32 c->x86_cache_size = 256; /* A few had 1 MB... */
33}
34
35static void __cpuinit nexgen_identify(struct cpuinfo_x86 * c)
36{
37 /* Detect NexGen with old hypercode */
38 if ( deep_magic_nexgen_probe() ) {
39 strcpy(c->x86_vendor_id, "NexGenDriven");
40 }
41}
42
43static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
44 .c_vendor = "Nexgen",
45 .c_ident = { "NexGenDriven" },
46 .c_models = {
47 { .vendor = X86_VENDOR_NEXGEN,
48 .family = 5,
49 .model_names = { [1] = "Nx586" }
50 },
51 },
52 .c_init = init_nexgen,
53 .c_identify = nexgen_identify,
54};
55
56int __init nexgen_init_cpu(void)
57{
58 cpu_devs[X86_VENDOR_NEXGEN] = &nexgen_cpu_dev;
59 return 0;
60}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
new file mode 100644
index 000000000000..93fecd4b03de
--- /dev/null
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -0,0 +1,713 @@
1/* local apic based NMI watchdog for various CPUs.
2 This file also handles reservation of performance counters for coordination
3 with other users (like oprofile).
4
5 Note that these events normally don't tick when the CPU idles. This means
6 the frequency varies with CPU load.
7
8 Original code for K7/P6 written by Keith Owens */
9
10#include <linux/percpu.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/bitops.h>
14#include <linux/smp.h>
15#include <linux/nmi.h>
16#include <asm/apic.h>
17#include <asm/intel_arch_perfmon.h>
18
19struct nmi_watchdog_ctlblk {
20 unsigned int cccr_msr;
21 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
22 unsigned int evntsel_msr; /* the MSR to select the events to handle */
23};
24
25/* Interface defining a CPU specific perfctr watchdog */
26struct wd_ops {
27 int (*reserve)(void);
28 void (*unreserve)(void);
29 int (*setup)(unsigned nmi_hz);
30 void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
31 void (*stop)(void);
32 unsigned perfctr;
33 unsigned evntsel;
34 u64 checkbit;
35};
36
37static struct wd_ops *wd_ops;
38
39/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
41 */
42#define NMI_MAX_COUNTER_BITS 66
43
44/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
45 * evtsel_nmi_owner tracks the ownership of the event selection
46 * - different performance counters/ event selection may be reserved for
47 * different subsystems this reservation system just tries to coordinate
48 * things a little
49 */
50static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
51static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
52
53static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
54
55/* converts an msr to an appropriate reservation bit */
56static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
57{
58 /* returns the bit offset of the performance counter register */
59 switch (boot_cpu_data.x86_vendor) {
60 case X86_VENDOR_AMD:
61 return (msr - MSR_K7_PERFCTR0);
62 case X86_VENDOR_INTEL:
63 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
64 return (msr - MSR_ARCH_PERFMON_PERFCTR0);
65
66 switch (boot_cpu_data.x86) {
67 case 6:
68 return (msr - MSR_P6_PERFCTR0);
69 case 15:
70 return (msr - MSR_P4_BPU_PERFCTR0);
71 }
72 }
73 return 0;
74}
75
76/* converts an msr to an appropriate reservation bit */
77/* returns the bit offset of the event selection register */
78static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
79{
80 /* returns the bit offset of the event selection register */
81 switch (boot_cpu_data.x86_vendor) {
82 case X86_VENDOR_AMD:
83 return (msr - MSR_K7_EVNTSEL0);
84 case X86_VENDOR_INTEL:
85 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
86 return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
87
88 switch (boot_cpu_data.x86) {
89 case 6:
90 return (msr - MSR_P6_EVNTSEL0);
91 case 15:
92 return (msr - MSR_P4_BSU_ESCR0);
93 }
94 }
95 return 0;
96
97}
98
99/* checks for a bit availability (hack for oprofile) */
100int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
101{
102 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
103
104 return (!test_bit(counter, perfctr_nmi_owner));
105}
106
107/* checks the an msr for availability */
108int avail_to_resrv_perfctr_nmi(unsigned int msr)
109{
110 unsigned int counter;
111
112 counter = nmi_perfctr_msr_to_bit(msr);
113 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
114
115 return (!test_bit(counter, perfctr_nmi_owner));
116}
117
118int reserve_perfctr_nmi(unsigned int msr)
119{
120 unsigned int counter;
121
122 counter = nmi_perfctr_msr_to_bit(msr);
123 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
124
125 if (!test_and_set_bit(counter, perfctr_nmi_owner))
126 return 1;
127 return 0;
128}
129
130void release_perfctr_nmi(unsigned int msr)
131{
132 unsigned int counter;
133
134 counter = nmi_perfctr_msr_to_bit(msr);
135 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
136
137 clear_bit(counter, perfctr_nmi_owner);
138}
139
140int reserve_evntsel_nmi(unsigned int msr)
141{
142 unsigned int counter;
143
144 counter = nmi_evntsel_msr_to_bit(msr);
145 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
146
147 if (!test_and_set_bit(counter, evntsel_nmi_owner))
148 return 1;
149 return 0;
150}
151
152void release_evntsel_nmi(unsigned int msr)
153{
154 unsigned int counter;
155
156 counter = nmi_evntsel_msr_to_bit(msr);
157 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
158
159 clear_bit(counter, evntsel_nmi_owner);
160}
161
162EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
163EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
164EXPORT_SYMBOL(reserve_perfctr_nmi);
165EXPORT_SYMBOL(release_perfctr_nmi);
166EXPORT_SYMBOL(reserve_evntsel_nmi);
167EXPORT_SYMBOL(release_evntsel_nmi);
168
169void disable_lapic_nmi_watchdog(void)
170{
171 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
172
173 if (atomic_read(&nmi_active) <= 0)
174 return;
175
176 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
177 wd_ops->unreserve();
178
179 BUG_ON(atomic_read(&nmi_active) != 0);
180}
181
182void enable_lapic_nmi_watchdog(void)
183{
184 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
185
186 /* are we already enabled */
187 if (atomic_read(&nmi_active) != 0)
188 return;
189
190 /* are we lapic aware */
191 if (!wd_ops)
192 return;
193 if (!wd_ops->reserve()) {
194 printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n");
195 return;
196 }
197
198 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
199 touch_nmi_watchdog();
200}
201
202/*
203 * Activate the NMI watchdog via the local APIC.
204 */
205
206static unsigned int adjust_for_32bit_ctr(unsigned int hz)
207{
208 u64 counter_val;
209 unsigned int retval = hz;
210
211 /*
212 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
213 * are writable, with higher bits sign extending from bit 31.
214 * So, we can only program the counter with 31 bit values and
215 * 32nd bit should be 1, for 33.. to be 1.
216 * Find the appropriate nmi_hz
217 */
218 counter_val = (u64)cpu_khz * 1000;
219 do_div(counter_val, retval);
220 if (counter_val > 0x7fffffffULL) {
221 u64 count = (u64)cpu_khz * 1000;
222 do_div(count, 0x7fffffffUL);
223 retval = count + 1;
224 }
225 return retval;
226}
227
228static void
229write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz)
230{
231 u64 count = (u64)cpu_khz * 1000;
232
233 do_div(count, nmi_hz);
234 if(descr)
235 Dprintk("setting %s to -0x%08Lx\n", descr, count);
236 wrmsrl(perfctr_msr, 0 - count);
237}
238
239static void write_watchdog_counter32(unsigned int perfctr_msr,
240 const char *descr, unsigned nmi_hz)
241{
242 u64 count = (u64)cpu_khz * 1000;
243
244 do_div(count, nmi_hz);
245 if(descr)
246 Dprintk("setting %s to -0x%08Lx\n", descr, count);
247 wrmsr(perfctr_msr, (u32)(-count), 0);
248}
249
250/* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface
251 nicely stable so there is not much variety */
252
253#define K7_EVNTSEL_ENABLE (1 << 22)
254#define K7_EVNTSEL_INT (1 << 20)
255#define K7_EVNTSEL_OS (1 << 17)
256#define K7_EVNTSEL_USR (1 << 16)
257#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
258#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
259
260static int setup_k7_watchdog(unsigned nmi_hz)
261{
262 unsigned int perfctr_msr, evntsel_msr;
263 unsigned int evntsel;
264 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
265
266 perfctr_msr = wd_ops->perfctr;
267 evntsel_msr = wd_ops->evntsel;
268
269 wrmsrl(perfctr_msr, 0UL);
270
271 evntsel = K7_EVNTSEL_INT
272 | K7_EVNTSEL_OS
273 | K7_EVNTSEL_USR
274 | K7_NMI_EVENT;
275
276 /* setup the timer */
277 wrmsr(evntsel_msr, evntsel, 0);
278 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
279 apic_write(APIC_LVTPC, APIC_DM_NMI);
280 evntsel |= K7_EVNTSEL_ENABLE;
281 wrmsr(evntsel_msr, evntsel, 0);
282
283 wd->perfctr_msr = perfctr_msr;
284 wd->evntsel_msr = evntsel_msr;
285 wd->cccr_msr = 0; //unused
286 return 1;
287}
288
289static void single_msr_stop_watchdog(void)
290{
291 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
292
293 wrmsr(wd->evntsel_msr, 0, 0);
294}
295
296static int single_msr_reserve(void)
297{
298 if (!reserve_perfctr_nmi(wd_ops->perfctr))
299 return 0;
300
301 if (!reserve_evntsel_nmi(wd_ops->evntsel)) {
302 release_perfctr_nmi(wd_ops->perfctr);
303 return 0;
304 }
305 return 1;
306}
307
308static void single_msr_unreserve(void)
309{
310 release_evntsel_nmi(wd_ops->evntsel);
311 release_perfctr_nmi(wd_ops->perfctr);
312}
313
314static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
315{
316 /* start the cycle over again */
317 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
318}
319
320static struct wd_ops k7_wd_ops = {
321 .reserve = single_msr_reserve,
322 .unreserve = single_msr_unreserve,
323 .setup = setup_k7_watchdog,
324 .rearm = single_msr_rearm,
325 .stop = single_msr_stop_watchdog,
326 .perfctr = MSR_K7_PERFCTR0,
327 .evntsel = MSR_K7_EVNTSEL0,
328 .checkbit = 1ULL<<47,
329};
330
331/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
332
333#define P6_EVNTSEL0_ENABLE (1 << 22)
334#define P6_EVNTSEL_INT (1 << 20)
335#define P6_EVNTSEL_OS (1 << 17)
336#define P6_EVNTSEL_USR (1 << 16)
337#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
338#define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
339
340static int setup_p6_watchdog(unsigned nmi_hz)
341{
342 unsigned int perfctr_msr, evntsel_msr;
343 unsigned int evntsel;
344 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
345
346 perfctr_msr = wd_ops->perfctr;
347 evntsel_msr = wd_ops->evntsel;
348
349 /* KVM doesn't implement this MSR */
350 if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
351 return 0;
352
353 evntsel = P6_EVNTSEL_INT
354 | P6_EVNTSEL_OS
355 | P6_EVNTSEL_USR
356 | P6_NMI_EVENT;
357
358 /* setup the timer */
359 wrmsr(evntsel_msr, evntsel, 0);
360 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
361 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
362 apic_write(APIC_LVTPC, APIC_DM_NMI);
363 evntsel |= P6_EVNTSEL0_ENABLE;
364 wrmsr(evntsel_msr, evntsel, 0);
365
366 wd->perfctr_msr = perfctr_msr;
367 wd->evntsel_msr = evntsel_msr;
368 wd->cccr_msr = 0; //unused
369 return 1;
370}
371
372static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
373{
374 /* P6 based Pentium M need to re-unmask
375 * the apic vector but it doesn't hurt
376 * other P6 variant.
377 * ArchPerfom/Core Duo also needs this */
378 apic_write(APIC_LVTPC, APIC_DM_NMI);
379 /* P6/ARCH_PERFMON has 32 bit counter write */
380 write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
381}
382
383static struct wd_ops p6_wd_ops = {
384 .reserve = single_msr_reserve,
385 .unreserve = single_msr_unreserve,
386 .setup = setup_p6_watchdog,
387 .rearm = p6_rearm,
388 .stop = single_msr_stop_watchdog,
389 .perfctr = MSR_P6_PERFCTR0,
390 .evntsel = MSR_P6_EVNTSEL0,
391 .checkbit = 1ULL<<39,
392};
393
394/* Intel P4 performance counters. By far the most complicated of all. */
395
396#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
397#define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
398#define P4_ESCR_OS (1<<3)
399#define P4_ESCR_USR (1<<2)
400#define P4_CCCR_OVF_PMI0 (1<<26)
401#define P4_CCCR_OVF_PMI1 (1<<27)
402#define P4_CCCR_THRESHOLD(N) ((N)<<20)
403#define P4_CCCR_COMPLEMENT (1<<19)
404#define P4_CCCR_COMPARE (1<<18)
405#define P4_CCCR_REQUIRED (3<<16)
406#define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
407#define P4_CCCR_ENABLE (1<<12)
408#define P4_CCCR_OVF (1<<31)
409
410/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
411 CRU_ESCR0 (with any non-null event selector) through a complemented
412 max threshold. [IA32-Vol3, Section 14.9.9] */
413
414static int setup_p4_watchdog(unsigned nmi_hz)
415{
416 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
417 unsigned int evntsel, cccr_val;
418 unsigned int misc_enable, dummy;
419 unsigned int ht_num;
420 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
421
422 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
423 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
424 return 0;
425
426#ifdef CONFIG_SMP
427 /* detect which hyperthread we are on */
428 if (smp_num_siblings == 2) {
429 unsigned int ebx, apicid;
430
431 ebx = cpuid_ebx(1);
432 apicid = (ebx >> 24) & 0xff;
433 ht_num = apicid & 1;
434 } else
435#endif
436 ht_num = 0;
437
438 /* performance counters are shared resources
439 * assign each hyperthread its own set
440 * (re-use the ESCR0 register, seems safe
441 * and keeps the cccr_val the same)
442 */
443 if (!ht_num) {
444 /* logical cpu 0 */
445 perfctr_msr = MSR_P4_IQ_PERFCTR0;
446 evntsel_msr = MSR_P4_CRU_ESCR0;
447 cccr_msr = MSR_P4_IQ_CCCR0;
448 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
449 } else {
450 /* logical cpu 1 */
451 perfctr_msr = MSR_P4_IQ_PERFCTR1;
452 evntsel_msr = MSR_P4_CRU_ESCR0;
453 cccr_msr = MSR_P4_IQ_CCCR1;
454 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
455 }
456
457 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
458 | P4_ESCR_OS
459 | P4_ESCR_USR;
460
461 cccr_val |= P4_CCCR_THRESHOLD(15)
462 | P4_CCCR_COMPLEMENT
463 | P4_CCCR_COMPARE
464 | P4_CCCR_REQUIRED;
465
466 wrmsr(evntsel_msr, evntsel, 0);
467 wrmsr(cccr_msr, cccr_val, 0);
468 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
469 apic_write(APIC_LVTPC, APIC_DM_NMI);
470 cccr_val |= P4_CCCR_ENABLE;
471 wrmsr(cccr_msr, cccr_val, 0);
472 wd->perfctr_msr = perfctr_msr;
473 wd->evntsel_msr = evntsel_msr;
474 wd->cccr_msr = cccr_msr;
475 return 1;
476}
477
478static void stop_p4_watchdog(void)
479{
480 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
481 wrmsr(wd->cccr_msr, 0, 0);
482 wrmsr(wd->evntsel_msr, 0, 0);
483}
484
485static int p4_reserve(void)
486{
487 if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0))
488 return 0;
489#ifdef CONFIG_SMP
490 if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1))
491 goto fail1;
492#endif
493 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
494 goto fail2;
495 /* RED-PEN why is ESCR1 not reserved here? */
496 return 1;
497 fail2:
498#ifdef CONFIG_SMP
499 if (smp_num_siblings > 1)
500 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
501 fail1:
502#endif
503 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
504 return 0;
505}
506
507static void p4_unreserve(void)
508{
509#ifdef CONFIG_SMP
510 if (smp_num_siblings > 1)
511 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
512#endif
513 release_evntsel_nmi(MSR_P4_CRU_ESCR0);
514 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
515}
516
517static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
518{
519 unsigned dummy;
520 /*
521 * P4 quirks:
522 * - An overflown perfctr will assert its interrupt
523 * until the OVF flag in its CCCR is cleared.
524 * - LVTPC is masked on interrupt and must be
525 * unmasked by the LVTPC handler.
526 */
527 rdmsrl(wd->cccr_msr, dummy);
528 dummy &= ~P4_CCCR_OVF;
529 wrmsrl(wd->cccr_msr, dummy);
530 apic_write(APIC_LVTPC, APIC_DM_NMI);
531 /* start the cycle over again */
532 write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
533}
534
535static struct wd_ops p4_wd_ops = {
536 .reserve = p4_reserve,
537 .unreserve = p4_unreserve,
538 .setup = setup_p4_watchdog,
539 .rearm = p4_rearm,
540 .stop = stop_p4_watchdog,
541 /* RED-PEN this is wrong for the other sibling */
542 .perfctr = MSR_P4_BPU_PERFCTR0,
543 .evntsel = MSR_P4_BSU_ESCR0,
544 .checkbit = 1ULL<<39,
545};
546
547/* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully
548 all future Intel CPUs. */
549
550#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
551#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
552
553static int setup_intel_arch_watchdog(unsigned nmi_hz)
554{
555 unsigned int ebx;
556 union cpuid10_eax eax;
557 unsigned int unused;
558 unsigned int perfctr_msr, evntsel_msr;
559 unsigned int evntsel;
560 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
561
562 /*
563 * Check whether the Architectural PerfMon supports
564 * Unhalted Core Cycles Event or not.
565 * NOTE: Corresponding bit = 0 in ebx indicates event present.
566 */
567 cpuid(10, &(eax.full), &ebx, &unused, &unused);
568 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
569 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
570 return 0;
571
572 perfctr_msr = wd_ops->perfctr;
573 evntsel_msr = wd_ops->evntsel;
574
575 wrmsrl(perfctr_msr, 0UL);
576
577 evntsel = ARCH_PERFMON_EVENTSEL_INT
578 | ARCH_PERFMON_EVENTSEL_OS
579 | ARCH_PERFMON_EVENTSEL_USR
580 | ARCH_PERFMON_NMI_EVENT_SEL
581 | ARCH_PERFMON_NMI_EVENT_UMASK;
582
583 /* setup the timer */
584 wrmsr(evntsel_msr, evntsel, 0);
585 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
586 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
587 apic_write(APIC_LVTPC, APIC_DM_NMI);
588 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
589 wrmsr(evntsel_msr, evntsel, 0);
590
591 wd->perfctr_msr = perfctr_msr;
592 wd->evntsel_msr = evntsel_msr;
593 wd->cccr_msr = 0; //unused
594 wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1);
595 return 1;
596}
597
598static struct wd_ops intel_arch_wd_ops = {
599 .reserve = single_msr_reserve,
600 .unreserve = single_msr_unreserve,
601 .setup = setup_intel_arch_watchdog,
602 .rearm = p6_rearm,
603 .stop = single_msr_stop_watchdog,
604 .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
605 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
606};
607
608static struct wd_ops coreduo_wd_ops = {
609 .reserve = single_msr_reserve,
610 .unreserve = single_msr_unreserve,
611 .setup = setup_intel_arch_watchdog,
612 .rearm = p6_rearm,
613 .stop = single_msr_stop_watchdog,
614 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
615 .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
616};
617
618static void probe_nmi_watchdog(void)
619{
620 switch (boot_cpu_data.x86_vendor) {
621 case X86_VENDOR_AMD:
622 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
623 boot_cpu_data.x86 != 16)
624 return;
625 wd_ops = &k7_wd_ops;
626 break;
627 case X86_VENDOR_INTEL:
628 /* Work around Core Duo (Yonah) errata AE49 where perfctr1
629 doesn't have a working enable bit. */
630 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
631 wd_ops = &coreduo_wd_ops;
632 break;
633 }
634 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
635 wd_ops = &intel_arch_wd_ops;
636 break;
637 }
638 switch (boot_cpu_data.x86) {
639 case 6:
640 if (boot_cpu_data.x86_model > 0xd)
641 return;
642
643 wd_ops = &p6_wd_ops;
644 break;
645 case 15:
646 if (boot_cpu_data.x86_model > 0x4)
647 return;
648
649 wd_ops = &p4_wd_ops;
650 break;
651 default:
652 return;
653 }
654 break;
655 }
656}
657
658/* Interface to nmi.c */
659
660int lapic_watchdog_init(unsigned nmi_hz)
661{
662 if (!wd_ops) {
663 probe_nmi_watchdog();
664 if (!wd_ops)
665 return -1;
666
667 if (!wd_ops->reserve()) {
668 printk(KERN_ERR
669 "NMI watchdog: cannot reserve perfctrs\n");
670 return -1;
671 }
672 }
673
674 if (!(wd_ops->setup(nmi_hz))) {
675 printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n",
676 raw_smp_processor_id());
677 return -1;
678 }
679
680 return 0;
681}
682
683void lapic_watchdog_stop(void)
684{
685 if (wd_ops)
686 wd_ops->stop();
687}
688
689unsigned lapic_adjust_nmi_hz(unsigned hz)
690{
691 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
692 if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
693 wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1)
694 hz = adjust_for_32bit_ctr(hz);
695 return hz;
696}
697
698int lapic_wd_event(unsigned nmi_hz)
699{
700 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
701 u64 ctr;
702 rdmsrl(wd->perfctr_msr, ctr);
703 if (ctr & wd_ops->checkbit) { /* perfctr still running? */
704 return 0;
705 }
706 wd_ops->rearm(wd, nmi_hz);
707 return 1;
708}
709
710int lapic_watchdog_ok(void)
711{
712 return wd_ops != NULL;
713}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
new file mode 100644
index 000000000000..1e31b6caffb1
--- /dev/null
+++ b/arch/x86/kernel/cpu/proc.c
@@ -0,0 +1,192 @@
1#include <linux/smp.h>
2#include <linux/timex.h>
3#include <linux/string.h>
4#include <asm/semaphore.h>
5#include <linux/seq_file.h>
6#include <linux/cpufreq.h>
7
8/*
9 * Get CPU information for use by the procfs.
10 */
11static int show_cpuinfo(struct seq_file *m, void *v)
12{
13 /*
14 * These flag bits must match the definitions in <asm/cpufeature.h>.
15 * NULL means this bit is undefined or reserved; either way it doesn't
16 * have meaning as far as Linux is concerned. Note that it's important
17 * to realize there is a difference between this table and CPUID -- if
18 * applications want to get the raw CPUID data, they should access
19 * /dev/cpu/<cpu_nr>/cpuid instead.
20 */
21 static const char * const x86_cap_flags[] = {
22 /* Intel-defined */
23 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
24 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
25 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
26 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
27
28 /* AMD-defined */
29 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
30 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
31 NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
32 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
33 "3dnowext", "3dnow",
34
35 /* Transmeta-defined */
36 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
37 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
38 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
39 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
40
41 /* Other (Linux-defined) */
42 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
43 NULL, NULL, NULL, NULL,
44 "constant_tsc", "up", NULL, "arch_perfmon",
45 "pebs", "bts", NULL, "sync_rdtsc",
46 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
47 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
48
49 /* Intel-defined (#2) */
50 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
51 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
52 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
53 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
54
55 /* VIA/Cyrix/Centaur-defined */
56 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
57 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60
61 /* AMD-defined (#2) */
62 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
63 "altmovcr8", "abm", "sse4a",
64 "misalignsse", "3dnowprefetch",
65 "osvw", "ibs", NULL, NULL, NULL, NULL,
66 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68
69 /* Auxiliary (Linux-defined) */
70 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
74 };
75 static const char * const x86_power_flags[] = {
76 "ts", /* temperature sensor */
77 "fid", /* frequency id control */
78 "vid", /* voltage id control */
79 "ttp", /* thermal trip */
80 "tm",
81 "stc",
82 "100mhzsteps",
83 "hwpstate",
84 "", /* constant_tsc - moved to flags */
85 /* nothing */
86 };
87 struct cpuinfo_x86 *c = v;
88 int i, n = c - cpu_data;
89 int fpu_exception;
90
91#ifdef CONFIG_SMP
92 if (!cpu_online(n))
93 return 0;
94#endif
95 seq_printf(m, "processor\t: %d\n"
96 "vendor_id\t: %s\n"
97 "cpu family\t: %d\n"
98 "model\t\t: %d\n"
99 "model name\t: %s\n",
100 n,
101 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
102 c->x86,
103 c->x86_model,
104 c->x86_model_id[0] ? c->x86_model_id : "unknown");
105
106 if (c->x86_mask || c->cpuid_level >= 0)
107 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
108 else
109 seq_printf(m, "stepping\t: unknown\n");
110
111 if ( cpu_has(c, X86_FEATURE_TSC) ) {
112 unsigned int freq = cpufreq_quick_get(n);
113 if (!freq)
114 freq = cpu_khz;
115 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
116 freq / 1000, (freq % 1000));
117 }
118
119 /* Cache size */
120 if (c->x86_cache_size >= 0)
121 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
122#ifdef CONFIG_X86_HT
123 if (c->x86_max_cores * smp_num_siblings > 1) {
124 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
125 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
126 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
127 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
128 }
129#endif
130
131 /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
132 fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
133 seq_printf(m, "fdiv_bug\t: %s\n"
134 "hlt_bug\t\t: %s\n"
135 "f00f_bug\t: %s\n"
136 "coma_bug\t: %s\n"
137 "fpu\t\t: %s\n"
138 "fpu_exception\t: %s\n"
139 "cpuid level\t: %d\n"
140 "wp\t\t: %s\n"
141 "flags\t\t:",
142 c->fdiv_bug ? "yes" : "no",
143 c->hlt_works_ok ? "no" : "yes",
144 c->f00f_bug ? "yes" : "no",
145 c->coma_bug ? "yes" : "no",
146 c->hard_math ? "yes" : "no",
147 fpu_exception ? "yes" : "no",
148 c->cpuid_level,
149 c->wp_works_ok ? "yes" : "no");
150
151 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
152 if ( test_bit(i, c->x86_capability) &&
153 x86_cap_flags[i] != NULL )
154 seq_printf(m, " %s", x86_cap_flags[i]);
155
156 for (i = 0; i < 32; i++)
157 if (c->x86_power & (1 << i)) {
158 if (i < ARRAY_SIZE(x86_power_flags) &&
159 x86_power_flags[i])
160 seq_printf(m, "%s%s",
161 x86_power_flags[i][0]?" ":"",
162 x86_power_flags[i]);
163 else
164 seq_printf(m, " [%d]", i);
165 }
166
167 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
168 c->loops_per_jiffy/(500000/HZ),
169 (c->loops_per_jiffy/(5000/HZ)) % 100);
170 seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
171
172 return 0;
173}
174
175static void *c_start(struct seq_file *m, loff_t *pos)
176{
177 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
178}
179static void *c_next(struct seq_file *m, void *v, loff_t *pos)
180{
181 ++*pos;
182 return c_start(m, pos);
183}
184static void c_stop(struct seq_file *m, void *v)
185{
186}
187struct seq_operations cpuinfo_op = {
188 .start = c_start,
189 .next = c_next,
190 .stop = c_stop,
191 .show = show_cpuinfo,
192};
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
new file mode 100644
index 000000000000..200fb3f9ebfb
--- /dev/null
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -0,0 +1,116 @@
1#include <linux/kernel.h>
2#include <linux/mm.h>
3#include <linux/init.h>
4#include <asm/processor.h>
5#include <asm/msr.h>
6#include "cpu.h"
7
8static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
9{
10 unsigned int cap_mask, uk, max, dummy;
11 unsigned int cms_rev1, cms_rev2;
12 unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev;
13 char cpu_info[65];
14
15 get_model_name(c); /* Same as AMD/Cyrix */
16 display_cacheinfo(c);
17
18 /* Print CMS and CPU revision */
19 max = cpuid_eax(0x80860000);
20 cpu_rev = 0;
21 if ( max >= 0x80860001 ) {
22 cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
23 if (cpu_rev != 0x02000000) {
24 printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
25 (cpu_rev >> 24) & 0xff,
26 (cpu_rev >> 16) & 0xff,
27 (cpu_rev >> 8) & 0xff,
28 cpu_rev & 0xff,
29 cpu_freq);
30 }
31 }
32 if ( max >= 0x80860002 ) {
33 cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
34 if (cpu_rev == 0x02000000) {
35 printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
36 new_cpu_rev, cpu_freq);
37 }
38 printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
39 (cms_rev1 >> 24) & 0xff,
40 (cms_rev1 >> 16) & 0xff,
41 (cms_rev1 >> 8) & 0xff,
42 cms_rev1 & 0xff,
43 cms_rev2);
44 }
45 if ( max >= 0x80860006 ) {
46 cpuid(0x80860003,
47 (void *)&cpu_info[0],
48 (void *)&cpu_info[4],
49 (void *)&cpu_info[8],
50 (void *)&cpu_info[12]);
51 cpuid(0x80860004,
52 (void *)&cpu_info[16],
53 (void *)&cpu_info[20],
54 (void *)&cpu_info[24],
55 (void *)&cpu_info[28]);
56 cpuid(0x80860005,
57 (void *)&cpu_info[32],
58 (void *)&cpu_info[36],
59 (void *)&cpu_info[40],
60 (void *)&cpu_info[44]);
61 cpuid(0x80860006,
62 (void *)&cpu_info[48],
63 (void *)&cpu_info[52],
64 (void *)&cpu_info[56],
65 (void *)&cpu_info[60]);
66 cpu_info[64] = '\0';
67 printk(KERN_INFO "CPU: %s\n", cpu_info);
68 }
69
70 /* Unhide possibly hidden capability flags */
71 rdmsr(0x80860004, cap_mask, uk);
72 wrmsr(0x80860004, ~0, uk);
73 c->x86_capability[0] = cpuid_edx(0x00000001);
74 wrmsr(0x80860004, cap_mask, uk);
75
76 /* All Transmeta CPUs have a constant TSC */
77 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
78
79 /* If we can run i686 user-space code, call us an i686 */
80#define USER686 ((1 << X86_FEATURE_TSC)|\
81 (1 << X86_FEATURE_CX8)|\
82 (1 << X86_FEATURE_CMOV))
83 if (c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686)
84 c->x86 = 6;
85
86#ifdef CONFIG_SYSCTL
87 /* randomize_va_space slows us down enormously;
88 it probably triggers retranslation of x86->native bytecode */
89 randomize_va_space = 0;
90#endif
91}
92
93static void __cpuinit transmeta_identify(struct cpuinfo_x86 * c)
94{
95 u32 xlvl;
96
97 /* Transmeta-defined flags: level 0x80860001 */
98 xlvl = cpuid_eax(0x80860000);
99 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
100 if ( xlvl >= 0x80860001 )
101 c->x86_capability[2] = cpuid_edx(0x80860001);
102 }
103}
104
105static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
106 .c_vendor = "Transmeta",
107 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
108 .c_init = init_transmeta,
109 .c_identify = transmeta_identify,
110};
111
112int __init transmeta_init_cpu(void)
113{
114 cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
115 return 0;
116}
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
new file mode 100644
index 000000000000..a7a4e75bdcd7
--- /dev/null
+++ b/arch/x86/kernel/cpu/umc.c
@@ -0,0 +1,26 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <asm/processor.h>
4#include "cpu.h"
5
6/* UMC chips appear to be only either 386 or 486, so no special init takes place.
7 */
8
9static struct cpu_dev umc_cpu_dev __cpuinitdata = {
10 .c_vendor = "UMC",
11 .c_ident = { "UMC UMC UMC" },
12 .c_models = {
13 { .vendor = X86_VENDOR_UMC, .family = 4, .model_names =
14 {
15 [1] = "U5D",
16 [2] = "U5S",
17 }
18 },
19 },
20};
21
22int __init umc_init_cpu(void)
23{
24 cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
25 return 0;
26}