diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-09-07 20:58:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-08 09:32:06 -0400 |
commit | ff73152ced60871f7d5fb7dee52fa499902a3c6d (patch) | |
tree | 19a236b407a16e37f86f3d3cb6bd631c86814854 /arch | |
parent | 2a02505055fdd44958efd0e140dd87cb9fdecaf1 (diff) |
x86: make 64 bit to use amd.c
arch/x86/kernel/cpu/amd.c is now 100% identical to
arch/x86/kernel/cpu/amd_64.c, so use amd.c on 64-bit too
and fix up the namespace impact.
Simplify the Kconfig glue as well.
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/Kconfig.cpu | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd_64.c | 473 |
3 files changed, 2 insertions, 484 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 6156ac25ff8c..ab77d409fee0 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -444,17 +444,9 @@ config CPU_SUP_CYRIX_32 | |||
444 | help | 444 | help |
445 | This enables extended support for Cyrix processors | 445 | This enables extended support for Cyrix processors |
446 | 446 | ||
447 | config CPU_SUP_AMD_32 | 447 | config CPU_SUP_AMD |
448 | default y | 448 | default y |
449 | bool "Support AMD processors" if PROCESSOR_SELECT | 449 | bool "Support AMD processors" if PROCESSOR_SELECT |
450 | depends on !64BIT | ||
451 | help | ||
452 | This enables extended support for AMD processors | ||
453 | |||
454 | config CPU_SUP_AMD_64 | ||
455 | default y | ||
456 | bool "Support AMD processors" if PROCESSOR_SELECT | ||
457 | depends on 64BIT | ||
458 | help | 450 | help |
459 | This enables extended support for AMD processors | 451 | This enables extended support for AMD processors |
460 | 452 | ||
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index d031f248dfc0..510d1bcb058a 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -10,8 +10,7 @@ obj-$(CONFIG_X86_64) += bugs_64.o | |||
10 | 10 | ||
11 | obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o | 11 | obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o |
12 | obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o | 12 | obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o |
13 | obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o | 13 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o |
14 | obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o | ||
15 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o | 14 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
16 | obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o | 15 | obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o |
17 | obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o | 16 | obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o |
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c deleted file mode 100644 index 32e73520adf7..000000000000 --- a/arch/x86/kernel/cpu/amd_64.c +++ /dev/null | |||
@@ -1,473 +0,0 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/bitops.h> | ||
3 | #include <linux/mm.h> | ||
4 | |||
5 | #include <asm/io.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/apic.h> | ||
8 | |||
9 | #ifdef CONFIG_X86_64 | ||
10 | # include <asm/numa_64.h> | ||
11 | # include <asm/mmconfig.h> | ||
12 | # include <asm/cacheflush.h> | ||
13 | #endif | ||
14 | |||
15 | #include <mach_apic.h> | ||
16 | |||
17 | #include "cpu.h" | ||
18 | |||
19 | #ifdef CONFIG_X86_32 | ||
20 | /* | ||
21 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | ||
22 | * misexecution of code under Linux. Owners of such processors should | ||
23 | * contact AMD for precise details and a CPU swap. | ||
24 | * | ||
25 | * See http://www.multimania.com/poulot/k6bug.html | ||
26 | * http://www.amd.com/K6/k6docs/revgd.html | ||
27 | * | ||
28 | * The following test is erm.. interesting. AMD neglected to up | ||
29 | * the chip setting when fixing the bug but they also tweaked some | ||
30 | * performance at the same time.. | ||
31 | */ | ||
32 | |||
33 | extern void vide(void); | ||
34 | __asm__(".align 4\nvide: ret"); | ||
35 | |||
36 | static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | ||
37 | { | ||
38 | /* | ||
39 | * General Systems BIOSen alias the cpu frequency registers | ||
40 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | ||
41 | * drivers subsequently pokes it, and changes the CPU speed. | ||
42 | * Workaround : Remove the unneeded alias. | ||
43 | */ | ||
44 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | ||
45 | #define CBAR_ENB (0x80000000) | ||
46 | #define CBAR_KEY (0X000000CB) | ||
47 | if (c->x86_model == 9 || c->x86_model == 10) { | ||
48 | if (inl (CBAR) & CBAR_ENB) | ||
49 | outl (0 | CBAR_KEY, CBAR); | ||
50 | } | ||
51 | } | ||
52 | |||
53 | |||
54 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | ||
55 | { | ||
56 | u32 l, h; | ||
57 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | ||
58 | |||
59 | if (c->x86_model < 6) { | ||
60 | /* Based on AMD doc 20734R - June 2000 */ | ||
61 | if (c->x86_model == 0) { | ||
62 | clear_cpu_cap(c, X86_FEATURE_APIC); | ||
63 | set_cpu_cap(c, X86_FEATURE_PGE); | ||
64 | } | ||
65 | return; | ||
66 | } | ||
67 | |||
68 | if (c->x86_model == 6 && c->x86_mask == 1) { | ||
69 | const int K6_BUG_LOOP = 1000000; | ||
70 | int n; | ||
71 | void (*f_vide)(void); | ||
72 | unsigned long d, d2; | ||
73 | |||
74 | printk(KERN_INFO "AMD K6 stepping B detected - "); | ||
75 | |||
76 | /* | ||
77 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | ||
78 | * calls at the same time. | ||
79 | */ | ||
80 | |||
81 | n = K6_BUG_LOOP; | ||
82 | f_vide = vide; | ||
83 | rdtscl(d); | ||
84 | while (n--) | ||
85 | f_vide(); | ||
86 | rdtscl(d2); | ||
87 | d = d2-d; | ||
88 | |||
89 | if (d > 20*K6_BUG_LOOP) | ||
90 | printk("system stability may be impaired when more than 32 MB are used.\n"); | ||
91 | else | ||
92 | printk("probably OK (after B9730xxxx).\n"); | ||
93 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | ||
94 | } | ||
95 | |||
96 | /* K6 with old style WHCR */ | ||
97 | if (c->x86_model < 8 || | ||
98 | (c->x86_model == 8 && c->x86_mask < 8)) { | ||
99 | /* We can only write allocate on the low 508Mb */ | ||
100 | if (mbytes > 508) | ||
101 | mbytes = 508; | ||
102 | |||
103 | rdmsr(MSR_K6_WHCR, l, h); | ||
104 | if ((l&0x0000FFFF) == 0) { | ||
105 | unsigned long flags; | ||
106 | l = (1<<0)|((mbytes/4)<<1); | ||
107 | local_irq_save(flags); | ||
108 | wbinvd(); | ||
109 | wrmsr(MSR_K6_WHCR, l, h); | ||
110 | local_irq_restore(flags); | ||
111 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | ||
112 | mbytes); | ||
113 | } | ||
114 | return; | ||
115 | } | ||
116 | |||
117 | if ((c->x86_model == 8 && c->x86_mask > 7) || | ||
118 | c->x86_model == 9 || c->x86_model == 13) { | ||
119 | /* The more serious chips .. */ | ||
120 | |||
121 | if (mbytes > 4092) | ||
122 | mbytes = 4092; | ||
123 | |||
124 | rdmsr(MSR_K6_WHCR, l, h); | ||
125 | if ((l&0xFFFF0000) == 0) { | ||
126 | unsigned long flags; | ||
127 | l = ((mbytes>>2)<<22)|(1<<16); | ||
128 | local_irq_save(flags); | ||
129 | wbinvd(); | ||
130 | wrmsr(MSR_K6_WHCR, l, h); | ||
131 | local_irq_restore(flags); | ||
132 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | ||
133 | mbytes); | ||
134 | } | ||
135 | |||
136 | return; | ||
137 | } | ||
138 | |||
139 | if (c->x86_model == 10) { | ||
140 | /* AMD Geode LX is model 10 */ | ||
141 | /* placeholder for any needed mods */ | ||
142 | return; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | ||
147 | { | ||
148 | u32 l, h; | ||
149 | |||
150 | /* | ||
151 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | ||
152 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
153 | * If the BIOS didn't enable it already, enable it here. | ||
154 | */ | ||
155 | if (c->x86_model >= 6 && c->x86_model <= 10) { | ||
156 | if (!cpu_has(c, X86_FEATURE_XMM)) { | ||
157 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | ||
158 | rdmsr(MSR_K7_HWCR, l, h); | ||
159 | l &= ~0x00008000; | ||
160 | wrmsr(MSR_K7_HWCR, l, h); | ||
161 | set_cpu_cap(c, X86_FEATURE_XMM); | ||
162 | } | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
167 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | ||
168 | * As per AMD technical note 27212 0.2 | ||
169 | */ | ||
170 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | ||
171 | rdmsr(MSR_K7_CLK_CTL, l, h); | ||
172 | if ((l & 0xfff00000) != 0x20000000) { | ||
173 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | ||
174 | ((l & 0x000fffff)|0x20000000)); | ||
175 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | set_cpu_cap(c, X86_FEATURE_K7); | ||
180 | } | ||
181 | #endif | ||
182 | |||
183 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
184 | static int __cpuinit nearby_node(int apicid) | ||
185 | { | ||
186 | int i, node; | ||
187 | |||
188 | for (i = apicid - 1; i >= 0; i--) { | ||
189 | node = apicid_to_node[i]; | ||
190 | if (node != NUMA_NO_NODE && node_online(node)) | ||
191 | return node; | ||
192 | } | ||
193 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | ||
194 | node = apicid_to_node[i]; | ||
195 | if (node != NUMA_NO_NODE && node_online(node)) | ||
196 | return node; | ||
197 | } | ||
198 | return first_node(node_online_map); /* Shouldn't happen */ | ||
199 | } | ||
200 | #endif | ||
201 | |||
202 | /* | ||
203 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | ||
204 | * Assumes number of cores is a power of two. | ||
205 | */ | ||
206 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | ||
207 | { | ||
208 | #ifdef CONFIG_X86_HT | ||
209 | unsigned bits; | ||
210 | |||
211 | bits = c->x86_coreid_bits; | ||
212 | |||
213 | /* Low order bits define the core id (index of core in socket) */ | ||
214 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | ||
215 | /* Convert the initial APIC ID into the socket ID */ | ||
216 | c->phys_proc_id = c->initial_apicid >> bits; | ||
217 | #endif | ||
218 | } | ||
219 | |||
220 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | ||
221 | { | ||
222 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
223 | int cpu = smp_processor_id(); | ||
224 | int node; | ||
225 | unsigned apicid = hard_smp_processor_id(); | ||
226 | |||
227 | node = c->phys_proc_id; | ||
228 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
229 | node = apicid_to_node[apicid]; | ||
230 | if (!node_online(node)) { | ||
231 | /* Two possibilities here: | ||
232 | - The CPU is missing memory and no node was created. | ||
233 | In that case try picking one from a nearby CPU | ||
234 | - The APIC IDs differ from the HyperTransport node IDs | ||
235 | which the K8 northbridge parsing fills in. | ||
236 | Assume they are all increased by a constant offset, | ||
237 | but in the same order as the HT nodeids. | ||
238 | If that doesn't result in a usable node fall back to the | ||
239 | path for the previous case. */ | ||
240 | |||
241 | int ht_nodeid = c->initial_apicid; | ||
242 | |||
243 | if (ht_nodeid >= 0 && | ||
244 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | ||
245 | node = apicid_to_node[ht_nodeid]; | ||
246 | /* Pick a nearby node */ | ||
247 | if (!node_online(node)) | ||
248 | node = nearby_node(apicid); | ||
249 | } | ||
250 | numa_set_node(cpu, node); | ||
251 | |||
252 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
253 | #endif | ||
254 | } | ||
255 | |||
256 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | ||
257 | { | ||
258 | #ifdef CONFIG_X86_HT | ||
259 | unsigned bits, ecx; | ||
260 | |||
261 | /* Multi core CPU? */ | ||
262 | if (c->extended_cpuid_level < 0x80000008) | ||
263 | return; | ||
264 | |||
265 | ecx = cpuid_ecx(0x80000008); | ||
266 | |||
267 | c->x86_max_cores = (ecx & 0xff) + 1; | ||
268 | |||
269 | /* CPU telling us the core id bits shift? */ | ||
270 | bits = (ecx >> 12) & 0xF; | ||
271 | |||
272 | /* Otherwise recompute */ | ||
273 | if (bits == 0) { | ||
274 | while ((1 << bits) < c->x86_max_cores) | ||
275 | bits++; | ||
276 | } | ||
277 | |||
278 | c->x86_coreid_bits = bits; | ||
279 | #endif | ||
280 | } | ||
281 | |||
282 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | ||
283 | { | ||
284 | early_init_amd_mc(c); | ||
285 | |||
286 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | ||
287 | if (c->x86_power & (1<<8)) | ||
288 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
289 | |||
290 | #ifdef CONFIG_X86_64 | ||
291 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | ||
292 | #else | ||
293 | /* Set MTRR capability flag if appropriate */ | ||
294 | if (c->x86 == 5) | ||
295 | if (c->x86_model == 13 || c->x86_model == 9 || | ||
296 | (c->x86_model == 8 && c->x86_mask >= 8)) | ||
297 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | ||
298 | #endif | ||
299 | } | ||
300 | |||
301 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||
302 | { | ||
303 | #ifdef CONFIG_SMP | ||
304 | unsigned long long value; | ||
305 | |||
306 | /* | ||
307 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | ||
308 | * bit 6 of msr C001_0015 | ||
309 | * | ||
310 | * Errata 63 for SH-B3 steppings | ||
311 | * Errata 122 for all steppings (F+ have it disabled by default) | ||
312 | */ | ||
313 | if (c->x86 == 0xf) { | ||
314 | rdmsrl(MSR_K7_HWCR, value); | ||
315 | value |= 1 << 6; | ||
316 | wrmsrl(MSR_K7_HWCR, value); | ||
317 | } | ||
318 | #endif | ||
319 | |||
320 | early_init_amd(c); | ||
321 | |||
322 | /* | ||
323 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | ||
324 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway | ||
325 | */ | ||
326 | clear_cpu_cap(c, 0*32+31); | ||
327 | |||
328 | #ifdef CONFIG_X86_64 | ||
329 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | ||
330 | if (c->x86 == 0xf) { | ||
331 | u32 level; | ||
332 | |||
333 | level = cpuid_eax(1); | ||
334 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | ||
335 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
336 | } | ||
337 | if (c->x86 == 0x10 || c->x86 == 0x11) | ||
338 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
339 | #else | ||
340 | |||
341 | /* | ||
342 | * FIXME: We should handle the K5 here. Set up the write | ||
343 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | ||
344 | * no bus pipeline) | ||
345 | */ | ||
346 | |||
347 | switch (c->x86) { | ||
348 | case 4: | ||
349 | init_amd_k5(c); | ||
350 | break; | ||
351 | case 5: | ||
352 | init_amd_k6(c); | ||
353 | break; | ||
354 | case 6: /* An Athlon/Duron */ | ||
355 | init_amd_k7(c); | ||
356 | break; | ||
357 | } | ||
358 | |||
359 | /* K6s reports MCEs but don't actually have all the MSRs */ | ||
360 | if (c->x86 < 6) | ||
361 | clear_cpu_cap(c, X86_FEATURE_MCE); | ||
362 | #endif | ||
363 | |||
364 | /* Enable workaround for FXSAVE leak */ | ||
365 | if (c->x86 >= 6) | ||
366 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | ||
367 | |||
368 | if (!c->x86_model_id[0]) { | ||
369 | switch (c->x86) { | ||
370 | case 0xf: | ||
371 | /* Should distinguish Models here, but this is only | ||
372 | a fallback anyways. */ | ||
373 | strcpy(c->x86_model_id, "Hammer"); | ||
374 | break; | ||
375 | } | ||
376 | } | ||
377 | |||
378 | display_cacheinfo(c); | ||
379 | |||
380 | /* Multi core CPU? */ | ||
381 | if (c->extended_cpuid_level >= 0x80000008) { | ||
382 | amd_detect_cmp(c); | ||
383 | srat_detect_node(c); | ||
384 | } | ||
385 | |||
386 | #ifdef CONFIG_X86_32 | ||
387 | detect_ht(c); | ||
388 | #endif | ||
389 | |||
390 | if (c->extended_cpuid_level >= 0x80000006) { | ||
391 | if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) | ||
392 | num_cache_leaves = 4; | ||
393 | else | ||
394 | num_cache_leaves = 3; | ||
395 | } | ||
396 | |||
397 | if (c->x86 >= 0xf && c->x86 <= 0x11) | ||
398 | set_cpu_cap(c, X86_FEATURE_K8); | ||
399 | |||
400 | if (cpu_has_xmm2) { | ||
401 | /* MFENCE stops RDTSC speculation */ | ||
402 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | ||
403 | } | ||
404 | |||
405 | #ifdef CONFIG_X86_64 | ||
406 | if (c->x86 == 0x10) { | ||
407 | /* do this for boot cpu */ | ||
408 | if (c == &boot_cpu_data) | ||
409 | check_enable_amd_mmconf_dmi(); | ||
410 | |||
411 | fam10h_check_enable_mmcfg(); | ||
412 | } | ||
413 | |||
414 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | ||
415 | unsigned long long tseg; | ||
416 | |||
417 | /* | ||
418 | * Split up direct mapping around the TSEG SMM area. | ||
419 | * Don't do it for gbpages because there seems very little | ||
420 | * benefit in doing so. | ||
421 | */ | ||
422 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | ||
423 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | ||
424 | if ((tseg>>PMD_SHIFT) < | ||
425 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | ||
426 | ((tseg>>PMD_SHIFT) < | ||
427 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | ||
428 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | ||
429 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
430 | } | ||
431 | } | ||
432 | #endif | ||
433 | } | ||
434 | |||
435 | #ifdef CONFIG_X86_32 | ||
436 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | ||
437 | { | ||
438 | /* AMD errata T13 (order #21922) */ | ||
439 | if ((c->x86 == 6)) { | ||
440 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | ||
441 | size = 64; | ||
442 | if (c->x86_model == 4 && | ||
443 | (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ | ||
444 | size = 256; | ||
445 | } | ||
446 | return size; | ||
447 | } | ||
448 | #endif | ||
449 | |||
450 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | ||
451 | .c_vendor = "AMD", | ||
452 | .c_ident = { "AuthenticAMD" }, | ||
453 | #ifdef CONFIG_X86_32 | ||
454 | .c_models = { | ||
455 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | ||
456 | { | ||
457 | [3] = "486 DX/2", | ||
458 | [7] = "486 DX/2-WB", | ||
459 | [8] = "486 DX/4", | ||
460 | [9] = "486 DX/4-WB", | ||
461 | [14] = "Am5x86-WT", | ||
462 | [15] = "Am5x86-WB" | ||
463 | } | ||
464 | }, | ||
465 | }, | ||
466 | .c_size_cache = amd_size_cache, | ||
467 | #endif | ||
468 | .c_early_init = early_init_amd, | ||
469 | .c_init = init_amd, | ||
470 | .c_x86_vendor = X86_VENDOR_AMD, | ||
471 | }; | ||
472 | |||
473 | cpu_dev_register(amd_cpu_dev); | ||