aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/amd.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/amd.c')
-rw-r--r--arch/x86/kernel/cpu/amd.c547
1 files changed, 359 insertions, 188 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index cae9cabc3031..32e73520adf7 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1,13 +1,22 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/bitops.h> 2#include <linux/bitops.h>
3#include <linux/mm.h> 3#include <linux/mm.h>
4
4#include <asm/io.h> 5#include <asm/io.h>
5#include <asm/processor.h> 6#include <asm/processor.h>
6#include <asm/apic.h> 7#include <asm/apic.h>
7 8
9#ifdef CONFIG_X86_64
10# include <asm/numa_64.h>
11# include <asm/mmconfig.h>
12# include <asm/cacheflush.h>
13#endif
14
8#include <mach_apic.h> 15#include <mach_apic.h>
16
9#include "cpu.h" 17#include "cpu.h"
10 18
19#ifdef CONFIG_X86_32
11/* 20/*
12 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 21 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
13 * misexecution of code under Linux. Owners of such processors should 22 * misexecution of code under Linux. Owners of such processors should
@@ -24,21 +33,273 @@
24extern void vide(void); 33extern void vide(void);
25__asm__(".align 4\nvide: ret"); 34__asm__(".align 4\nvide: ret");
26 35
27static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 36static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
28{ 37{
29 if (cpuid_eax(0x80000000) >= 0x80000007) { 38/*
30 c->x86_power = cpuid_edx(0x80000007); 39 * General Systems BIOSen alias the cpu frequency registers
31 if (c->x86_power & (1<<8)) 40 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 41 * drivers subsequently pokes it, and changes the CPU speed.
42 * Workaround : Remove the unneeded alias.
43 */
44#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
45#define CBAR_ENB (0x80000000)
46#define CBAR_KEY (0X000000CB)
47 if (c->x86_model == 9 || c->x86_model == 10) {
48 if (inl (CBAR) & CBAR_ENB)
49 outl (0 | CBAR_KEY, CBAR);
33 } 50 }
34} 51}
35 52
36static void __cpuinit init_amd(struct cpuinfo_x86 *c) 53
54static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
37{ 55{
38 u32 l, h; 56 u32 l, h;
39 int mbytes = num_physpages >> (20-PAGE_SHIFT); 57 int mbytes = num_physpages >> (20-PAGE_SHIFT);
40 int r;
41 58
59 if (c->x86_model < 6) {
60 /* Based on AMD doc 20734R - June 2000 */
61 if (c->x86_model == 0) {
62 clear_cpu_cap(c, X86_FEATURE_APIC);
63 set_cpu_cap(c, X86_FEATURE_PGE);
64 }
65 return;
66 }
67
68 if (c->x86_model == 6 && c->x86_mask == 1) {
69 const int K6_BUG_LOOP = 1000000;
70 int n;
71 void (*f_vide)(void);
72 unsigned long d, d2;
73
74 printk(KERN_INFO "AMD K6 stepping B detected - ");
75
76 /*
77 * It looks like AMD fixed the 2.6.2 bug and improved indirect
78 * calls at the same time.
79 */
80
81 n = K6_BUG_LOOP;
82 f_vide = vide;
83 rdtscl(d);
84 while (n--)
85 f_vide();
86 rdtscl(d2);
87 d = d2-d;
88
89 if (d > 20*K6_BUG_LOOP)
90 printk("system stability may be impaired when more than 32 MB are used.\n");
91 else
92 printk("probably OK (after B9730xxxx).\n");
93 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
94 }
95
96 /* K6 with old style WHCR */
97 if (c->x86_model < 8 ||
98 (c->x86_model == 8 && c->x86_mask < 8)) {
99 /* We can only write allocate on the low 508Mb */
100 if (mbytes > 508)
101 mbytes = 508;
102
103 rdmsr(MSR_K6_WHCR, l, h);
104 if ((l&0x0000FFFF) == 0) {
105 unsigned long flags;
106 l = (1<<0)|((mbytes/4)<<1);
107 local_irq_save(flags);
108 wbinvd();
109 wrmsr(MSR_K6_WHCR, l, h);
110 local_irq_restore(flags);
111 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
112 mbytes);
113 }
114 return;
115 }
116
117 if ((c->x86_model == 8 && c->x86_mask > 7) ||
118 c->x86_model == 9 || c->x86_model == 13) {
119 /* The more serious chips .. */
120
121 if (mbytes > 4092)
122 mbytes = 4092;
123
124 rdmsr(MSR_K6_WHCR, l, h);
125 if ((l&0xFFFF0000) == 0) {
126 unsigned long flags;
127 l = ((mbytes>>2)<<22)|(1<<16);
128 local_irq_save(flags);
129 wbinvd();
130 wrmsr(MSR_K6_WHCR, l, h);
131 local_irq_restore(flags);
132 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
133 mbytes);
134 }
135
136 return;
137 }
138
139 if (c->x86_model == 10) {
140 /* AMD Geode LX is model 10 */
141 /* placeholder for any needed mods */
142 return;
143 }
144}
145
146static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
147{
148 u32 l, h;
149
150 /*
151 * Bit 15 of Athlon specific MSR 15, needs to be 0
152 * to enable SSE on Palomino/Morgan/Barton CPU's.
153 * If the BIOS didn't enable it already, enable it here.
154 */
155 if (c->x86_model >= 6 && c->x86_model <= 10) {
156 if (!cpu_has(c, X86_FEATURE_XMM)) {
157 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
158 rdmsr(MSR_K7_HWCR, l, h);
159 l &= ~0x00008000;
160 wrmsr(MSR_K7_HWCR, l, h);
161 set_cpu_cap(c, X86_FEATURE_XMM);
162 }
163 }
164
165 /*
166 * It's been determined by AMD that Athlons since model 8 stepping 1
167 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
168 * As per AMD technical note 27212 0.2
169 */
170 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
171 rdmsr(MSR_K7_CLK_CTL, l, h);
172 if ((l & 0xfff00000) != 0x20000000) {
173 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
174 ((l & 0x000fffff)|0x20000000));
175 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
176 }
177 }
178
179 set_cpu_cap(c, X86_FEATURE_K7);
180}
181#endif
182
183#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
184static int __cpuinit nearby_node(int apicid)
185{
186 int i, node;
187
188 for (i = apicid - 1; i >= 0; i--) {
189 node = apicid_to_node[i];
190 if (node != NUMA_NO_NODE && node_online(node))
191 return node;
192 }
193 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
194 node = apicid_to_node[i];
195 if (node != NUMA_NO_NODE && node_online(node))
196 return node;
197 }
198 return first_node(node_online_map); /* Shouldn't happen */
199}
200#endif
201
202/*
203 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
204 * Assumes number of cores is a power of two.
205 */
206static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
207{
208#ifdef CONFIG_X86_HT
209 unsigned bits;
210
211 bits = c->x86_coreid_bits;
212
213 /* Low order bits define the core id (index of core in socket) */
214 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
215 /* Convert the initial APIC ID into the socket ID */
216 c->phys_proc_id = c->initial_apicid >> bits;
217#endif
218}
219
220static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
221{
222#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
223 int cpu = smp_processor_id();
224 int node;
225 unsigned apicid = hard_smp_processor_id();
226
227 node = c->phys_proc_id;
228 if (apicid_to_node[apicid] != NUMA_NO_NODE)
229 node = apicid_to_node[apicid];
230 if (!node_online(node)) {
231 /* Two possibilities here:
232 - The CPU is missing memory and no node was created.
233 In that case try picking one from a nearby CPU
234 - The APIC IDs differ from the HyperTransport node IDs
235 which the K8 northbridge parsing fills in.
236 Assume they are all increased by a constant offset,
237 but in the same order as the HT nodeids.
238 If that doesn't result in a usable node fall back to the
239 path for the previous case. */
240
241 int ht_nodeid = c->initial_apicid;
242
243 if (ht_nodeid >= 0 &&
244 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
245 node = apicid_to_node[ht_nodeid];
246 /* Pick a nearby node */
247 if (!node_online(node))
248 node = nearby_node(apicid);
249 }
250 numa_set_node(cpu, node);
251
252 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
253#endif
254}
255
256static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
257{
258#ifdef CONFIG_X86_HT
259 unsigned bits, ecx;
260
261 /* Multi core CPU? */
262 if (c->extended_cpuid_level < 0x80000008)
263 return;
264
265 ecx = cpuid_ecx(0x80000008);
266
267 c->x86_max_cores = (ecx & 0xff) + 1;
268
269 /* CPU telling us the core id bits shift? */
270 bits = (ecx >> 12) & 0xF;
271
272 /* Otherwise recompute */
273 if (bits == 0) {
274 while ((1 << bits) < c->x86_max_cores)
275 bits++;
276 }
277
278 c->x86_coreid_bits = bits;
279#endif
280}
281
282static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
283{
284 early_init_amd_mc(c);
285
286 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
287 if (c->x86_power & (1<<8))
288 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
289
290#ifdef CONFIG_X86_64
291 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
292#else
293 /* Set MTRR capability flag if appropriate */
294 if (c->x86 == 5)
295 if (c->x86_model == 13 || c->x86_model == 9 ||
296 (c->x86_model == 8 && c->x86_mask >= 8))
297 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
298#endif
299}
300
301static void __cpuinit init_amd(struct cpuinfo_x86 *c)
302{
42#ifdef CONFIG_SMP 303#ifdef CONFIG_SMP
43 unsigned long long value; 304 unsigned long long value;
44 305
@@ -49,7 +310,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
49 * Errata 63 for SH-B3 steppings 310 * Errata 63 for SH-B3 steppings
50 * Errata 122 for all steppings (F+ have it disabled by default) 311 * Errata 122 for all steppings (F+ have it disabled by default)
51 */ 312 */
52 if (c->x86 == 15) { 313 if (c->x86 == 0xf) {
53 rdmsrl(MSR_K7_HWCR, value); 314 rdmsrl(MSR_K7_HWCR, value);
54 value |= 1 << 6; 315 value |= 1 << 6;
55 wrmsrl(MSR_K7_HWCR, value); 316 wrmsrl(MSR_K7_HWCR, value);
@@ -59,213 +320,119 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
59 early_init_amd(c); 320 early_init_amd(c);
60 321
61 /* 322 /*
62 * FIXME: We should handle the K5 here. Set up the write
63 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
64 * no bus pipeline)
65 */
66
67 /*
68 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 323 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
69 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 324 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
70 */ 325 */
71 clear_cpu_cap(c, 0*32+31); 326 clear_cpu_cap(c, 0*32+31);
72 327
73 r = get_model_name(c); 328#ifdef CONFIG_X86_64
329 /* On C+ stepping K8 rep microcode works well for copy/memset */
330 if (c->x86 == 0xf) {
331 u32 level;
74 332
75 switch (c->x86) { 333 level = cpuid_eax(1);
76 case 4: 334 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
77 /* 335 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
78 * General Systems BIOSen alias the cpu frequency registers
79 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
80 * drivers subsequently pokes it, and changes the CPU speed.
81 * Workaround : Remove the unneeded alias.
82 */
83#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
84#define CBAR_ENB (0x80000000)
85#define CBAR_KEY (0X000000CB)
86 if (c->x86_model == 9 || c->x86_model == 10) {
87 if (inl (CBAR) & CBAR_ENB)
88 outl (0 | CBAR_KEY, CBAR);
89 }
90 break;
91 case 5:
92 if (c->x86_model < 6) {
93 /* Based on AMD doc 20734R - June 2000 */
94 if (c->x86_model == 0) {
95 clear_cpu_cap(c, X86_FEATURE_APIC);
96 set_cpu_cap(c, X86_FEATURE_PGE);
97 }
98 break;
99 }
100
101 if (c->x86_model == 6 && c->x86_mask == 1) {
102 const int K6_BUG_LOOP = 1000000;
103 int n;
104 void (*f_vide)(void);
105 unsigned long d, d2;
106
107 printk(KERN_INFO "AMD K6 stepping B detected - ");
108
109 /*
110 * It looks like AMD fixed the 2.6.2 bug and improved indirect
111 * calls at the same time.
112 */
113
114 n = K6_BUG_LOOP;
115 f_vide = vide;
116 rdtscl(d);
117 while (n--)
118 f_vide();
119 rdtscl(d2);
120 d = d2-d;
121
122 if (d > 20*K6_BUG_LOOP)
123 printk("system stability may be impaired when more than 32 MB are used.\n");
124 else
125 printk("probably OK (after B9730xxxx).\n");
126 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
127 }
128
129 /* K6 with old style WHCR */
130 if (c->x86_model < 8 ||
131 (c->x86_model == 8 && c->x86_mask < 8)) {
132 /* We can only write allocate on the low 508Mb */
133 if (mbytes > 508)
134 mbytes = 508;
135
136 rdmsr(MSR_K6_WHCR, l, h);
137 if ((l&0x0000FFFF) == 0) {
138 unsigned long flags;
139 l = (1<<0)|((mbytes/4)<<1);
140 local_irq_save(flags);
141 wbinvd();
142 wrmsr(MSR_K6_WHCR, l, h);
143 local_irq_restore(flags);
144 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
145 mbytes);
146 }
147 break;
148 }
149
150 if ((c->x86_model == 8 && c->x86_mask > 7) ||
151 c->x86_model == 9 || c->x86_model == 13) {
152 /* The more serious chips .. */
153
154 if (mbytes > 4092)
155 mbytes = 4092;
156
157 rdmsr(MSR_K6_WHCR, l, h);
158 if ((l&0xFFFF0000) == 0) {
159 unsigned long flags;
160 l = ((mbytes>>2)<<22)|(1<<16);
161 local_irq_save(flags);
162 wbinvd();
163 wrmsr(MSR_K6_WHCR, l, h);
164 local_irq_restore(flags);
165 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
166 mbytes);
167 }
168
169 /* Set MTRR capability flag if appropriate */
170 if (c->x86_model == 13 || c->x86_model == 9 ||
171 (c->x86_model == 8 && c->x86_mask >= 8))
172 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
173 break;
174 }
175
176 if (c->x86_model == 10) {
177 /* AMD Geode LX is model 10 */
178 /* placeholder for any needed mods */
179 break;
180 }
181 break;
182 case 6: /* An Athlon/Duron */
183
184 /*
185 * Bit 15 of Athlon specific MSR 15, needs to be 0
186 * to enable SSE on Palomino/Morgan/Barton CPU's.
187 * If the BIOS didn't enable it already, enable it here.
188 */
189 if (c->x86_model >= 6 && c->x86_model <= 10) {
190 if (!cpu_has(c, X86_FEATURE_XMM)) {
191 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
192 rdmsr(MSR_K7_HWCR, l, h);
193 l &= ~0x00008000;
194 wrmsr(MSR_K7_HWCR, l, h);
195 set_cpu_cap(c, X86_FEATURE_XMM);
196 }
197 }
198
199 /*
200 * It's been determined by AMD that Athlons since model 8 stepping 1
201 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
202 * As per AMD technical note 27212 0.2
203 */
204 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
205 rdmsr(MSR_K7_CLK_CTL, l, h);
206 if ((l & 0xfff00000) != 0x20000000) {
207 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
208 ((l & 0x000fffff)|0x20000000));
209 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
210 }
211 }
212 break;
213 } 336 }
337 if (c->x86 == 0x10 || c->x86 == 0x11)
338 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
339#else
340
341 /*
342 * FIXME: We should handle the K5 here. Set up the write
343 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
344 * no bus pipeline)
345 */
214 346
215 switch (c->x86) { 347 switch (c->x86) {
216 case 15: 348 case 4:
217 /* Use K8 tuning for Fam10h and Fam11h */ 349 init_amd_k5(c);
218 case 0x10:
219 case 0x11:
220 set_cpu_cap(c, X86_FEATURE_K8);
221 break; 350 break;
222 case 6: 351 case 5:
223 set_cpu_cap(c, X86_FEATURE_K7); 352 init_amd_k6(c);
353 break;
354 case 6: /* An Athlon/Duron */
355 init_amd_k7(c);
224 break; 356 break;
225 } 357 }
358
359 /* K6s reports MCEs but don't actually have all the MSRs */
360 if (c->x86 < 6)
361 clear_cpu_cap(c, X86_FEATURE_MCE);
362#endif
363
364 /* Enable workaround for FXSAVE leak */
226 if (c->x86 >= 6) 365 if (c->x86 >= 6)
227 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); 366 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
228 367
229 display_cacheinfo(c); 368 if (!c->x86_model_id[0]) {
230 369 switch (c->x86) {
231 if (cpuid_eax(0x80000000) >= 0x80000008) 370 case 0xf:
232 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 371 /* Should distinguish Models here, but this is only
372 a fallback anyways. */
373 strcpy(c->x86_model_id, "Hammer");
374 break;
375 }
376 }
233 377
234#ifdef CONFIG_X86_HT 378 display_cacheinfo(c);
235 /*
236 * On a AMD multi core setup the lower bits of the APIC id
237 * distinguish the cores.
238 */
239 if (c->x86_max_cores > 1) {
240 int cpu = smp_processor_id();
241 unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
242 379
243 if (bits == 0) { 380 /* Multi core CPU? */
244 while ((1 << bits) < c->x86_max_cores) 381 if (c->extended_cpuid_level >= 0x80000008) {
245 bits++; 382 amd_detect_cmp(c);
246 } 383 srat_detect_node(c);
247 c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
248 c->phys_proc_id >>= bits;
249 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
250 cpu, c->x86_max_cores, c->cpu_core_id);
251 } 384 }
385
386#ifdef CONFIG_X86_32
387 detect_ht(c);
252#endif 388#endif
253 389
254 if (cpuid_eax(0x80000000) >= 0x80000006) { 390 if (c->extended_cpuid_level >= 0x80000006) {
255 if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000)) 391 if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
256 num_cache_leaves = 4; 392 num_cache_leaves = 4;
257 else 393 else
258 num_cache_leaves = 3; 394 num_cache_leaves = 3;
259 } 395 }
260 396
261 /* K6s reports MCEs but don't actually have all the MSRs */ 397 if (c->x86 >= 0xf && c->x86 <= 0x11)
262 if (c->x86 < 6) 398 set_cpu_cap(c, X86_FEATURE_K8);
263 clear_cpu_cap(c, X86_FEATURE_MCE);
264 399
265 if (cpu_has_xmm2) 400 if (cpu_has_xmm2) {
401 /* MFENCE stops RDTSC speculation */
266 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 402 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
403 }
404
405#ifdef CONFIG_X86_64
406 if (c->x86 == 0x10) {
407 /* do this for boot cpu */
408 if (c == &boot_cpu_data)
409 check_enable_amd_mmconf_dmi();
410
411 fam10h_check_enable_mmcfg();
412 }
413
414 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
415 unsigned long long tseg;
416
417 /*
418 * Split up direct mapping around the TSEG SMM area.
419 * Don't do it for gbpages because there seems very little
420 * benefit in doing so.
421 */
422 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
423 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
424 if ((tseg>>PMD_SHIFT) <
425 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
426 ((tseg>>PMD_SHIFT) <
427 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
428 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
429 set_memory_4k((unsigned long)__va(tseg), 1);
430 }
431 }
432#endif
267} 433}
268 434
435#ifdef CONFIG_X86_32
269static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 436static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
270{ 437{
271 /* AMD errata T13 (order #21922) */ 438 /* AMD errata T13 (order #21922) */
@@ -278,10 +445,12 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
278 } 445 }
279 return size; 446 return size;
280} 447}
448#endif
281 449
282static struct cpu_dev amd_cpu_dev __cpuinitdata = { 450static struct cpu_dev amd_cpu_dev __cpuinitdata = {
283 .c_vendor = "AMD", 451 .c_vendor = "AMD",
284 .c_ident = { "AuthenticAMD" }, 452 .c_ident = { "AuthenticAMD" },
453#ifdef CONFIG_X86_32
285 .c_models = { 454 .c_models = {
286 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = 455 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
287 { 456 {
@@ -294,9 +463,11 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
294 } 463 }
295 }, 464 },
296 }, 465 },
466 .c_size_cache = amd_size_cache,
467#endif
297 .c_early_init = early_init_amd, 468 .c_early_init = early_init_amd,
298 .c_init = init_amd, 469 .c_init = init_amd,
299 .c_size_cache = amd_size_cache, 470 .c_x86_vendor = X86_VENDOR_AMD,
300}; 471};
301 472
302cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); 473cpu_dev_register(amd_cpu_dev);