diff options
Diffstat (limited to 'arch/x86/kernel/cpu/intel.c')
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 365 |
1 files changed, 197 insertions, 168 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b75f2569b8f8..99468dbd08da 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -15,6 +15,11 @@ | |||
15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
17 | 17 | ||
18 | #ifdef CONFIG_X86_64 | ||
19 | #include <asm/topology.h> | ||
20 | #include <asm/numa_64.h> | ||
21 | #endif | ||
22 | |||
18 | #include "cpu.h" | 23 | #include "cpu.h" |
19 | 24 | ||
20 | #ifdef CONFIG_X86_LOCAL_APIC | 25 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -23,23 +28,22 @@ | |||
23 | #include <mach_apic.h> | 28 | #include <mach_apic.h> |
24 | #endif | 29 | #endif |
25 | 30 | ||
26 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
27 | /* | ||
28 | * Alignment at which movsl is preferred for bulk memory copies. | ||
29 | */ | ||
30 | struct movsl_mask movsl_mask __read_mostly; | ||
31 | #endif | ||
32 | |||
33 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 31 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
34 | { | 32 | { |
35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
36 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
37 | c->x86_cache_alignment = 128; | ||
38 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 33 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
39 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 34 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
40 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 35 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
36 | |||
37 | #ifdef CONFIG_X86_64 | ||
38 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | ||
39 | #else | ||
40 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | ||
41 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | ||
42 | c->x86_cache_alignment = 128; | ||
43 | #endif | ||
41 | } | 44 | } |
42 | 45 | ||
46 | #ifdef CONFIG_X86_32 | ||
43 | /* | 47 | /* |
44 | * Early probe support logic for ppro memory erratum #50 | 48 | * Early probe support logic for ppro memory erratum #50 |
45 | * | 49 | * |
@@ -59,15 +63,54 @@ int __cpuinit ppro_with_ram_bug(void) | |||
59 | return 0; | 63 | return 0; |
60 | } | 64 | } |
61 | 65 | ||
66 | #ifdef CONFIG_X86_F00F_BUG | ||
67 | static void __cpuinit trap_init_f00f_bug(void) | ||
68 | { | ||
69 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | ||
62 | 70 | ||
63 | /* | 71 | /* |
64 | * P4 Xeon errata 037 workaround. | 72 | * Update the IDT descriptor and reload the IDT so that |
65 | * Hardware prefetcher may cause stale data to be loaded into the cache. | 73 | * it uses the read-only mapped virtual address. |
66 | */ | 74 | */ |
67 | static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | 75 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); |
76 | load_idt(&idt_descr); | ||
77 | } | ||
78 | #endif | ||
79 | |||
80 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | ||
68 | { | 81 | { |
69 | unsigned long lo, hi; | 82 | unsigned long lo, hi; |
70 | 83 | ||
84 | #ifdef CONFIG_X86_F00F_BUG | ||
85 | /* | ||
86 | * All current models of Pentium and Pentium with MMX technology CPUs | ||
87 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | ||
88 | * Note that the workaround only should be initialized once... | ||
89 | */ | ||
90 | c->f00f_bug = 0; | ||
91 | if (!paravirt_enabled() && c->x86 == 5) { | ||
92 | static int f00f_workaround_enabled; | ||
93 | |||
94 | c->f00f_bug = 1; | ||
95 | if (!f00f_workaround_enabled) { | ||
96 | trap_init_f00f_bug(); | ||
97 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | ||
98 | f00f_workaround_enabled = 1; | ||
99 | } | ||
100 | } | ||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until | ||
105 | * model 3 mask 3 | ||
106 | */ | ||
107 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | ||
108 | clear_cpu_cap(c, X86_FEATURE_SEP); | ||
109 | |||
110 | /* | ||
111 | * P4 Xeon errata 037 workaround. | ||
112 | * Hardware prefetcher may cause stale data to be loaded into the cache. | ||
113 | */ | ||
71 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | 114 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
72 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); | 115 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
73 | if ((lo & (1<<9)) == 0) { | 116 | if ((lo & (1<<9)) == 0) { |
@@ -77,13 +120,68 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
77 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 120 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
78 | } | 121 | } |
79 | } | 122 | } |
123 | |||
124 | /* | ||
125 | * See if we have a good local APIC by checking for buggy Pentia, | ||
126 | * i.e. all B steppings and the C2 stepping of P54C when using their | ||
127 | * integrated APIC (see 11AP erratum in "Pentium Processor | ||
128 | * Specification Update"). | ||
129 | */ | ||
130 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | ||
131 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | ||
132 | set_cpu_cap(c, X86_FEATURE_11AP); | ||
133 | |||
134 | |||
135 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
136 | /* | ||
137 | * Set up the preferred alignment for movsl bulk memory moves | ||
138 | */ | ||
139 | switch (c->x86) { | ||
140 | case 4: /* 486: untested */ | ||
141 | break; | ||
142 | case 5: /* Old Pentia: untested */ | ||
143 | break; | ||
144 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | ||
145 | movsl_mask.mask = 7; | ||
146 | break; | ||
147 | case 15: /* P4 is OK down to 8-byte alignment */ | ||
148 | movsl_mask.mask = 7; | ||
149 | break; | ||
150 | } | ||
151 | #endif | ||
152 | |||
153 | #ifdef CONFIG_X86_NUMAQ | ||
154 | numaq_tsc_disable(); | ||
155 | #endif | ||
80 | } | 156 | } |
157 | #else | ||
158 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | ||
159 | { | ||
160 | } | ||
161 | #endif | ||
81 | 162 | ||
163 | static void __cpuinit srat_detect_node(void) | ||
164 | { | ||
165 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
166 | unsigned node; | ||
167 | int cpu = smp_processor_id(); | ||
168 | int apicid = hard_smp_processor_id(); | ||
169 | |||
170 | /* Don't do the funky fallback heuristics the AMD version employs | ||
171 | for now. */ | ||
172 | node = apicid_to_node[apicid]; | ||
173 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
174 | node = first_node(node_online_map); | ||
175 | numa_set_node(cpu, node); | ||
176 | |||
177 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
178 | #endif | ||
179 | } | ||
82 | 180 | ||
83 | /* | 181 | /* |
84 | * find out the number of processor cores on the die | 182 | * find out the number of processor cores on the die |
85 | */ | 183 | */ |
86 | static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) | 184 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) |
87 | { | 185 | { |
88 | unsigned int eax, ebx, ecx, edx; | 186 | unsigned int eax, ebx, ecx, edx; |
89 | 187 | ||
@@ -98,45 +196,51 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) | |||
98 | return 1; | 196 | return 1; |
99 | } | 197 | } |
100 | 198 | ||
101 | #ifdef CONFIG_X86_F00F_BUG | 199 | static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) |
102 | static void __cpuinit trap_init_f00f_bug(void) | ||
103 | { | 200 | { |
104 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | 201 | /* Intel VMX MSR indicated features */ |
105 | 202 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | |
106 | /* | 203 | #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 |
107 | * Update the IDT descriptor and reload the IDT so that | 204 | #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 |
108 | * it uses the read-only mapped virtual address. | 205 | #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 |
109 | */ | 206 | #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 |
110 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | 207 | #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 |
111 | load_idt(&idt_descr); | 208 | |
209 | u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; | ||
210 | |||
211 | clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | ||
212 | clear_cpu_cap(c, X86_FEATURE_VNMI); | ||
213 | clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | ||
214 | clear_cpu_cap(c, X86_FEATURE_EPT); | ||
215 | clear_cpu_cap(c, X86_FEATURE_VPID); | ||
216 | |||
217 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); | ||
218 | msr_ctl = vmx_msr_high | vmx_msr_low; | ||
219 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) | ||
220 | set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | ||
221 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) | ||
222 | set_cpu_cap(c, X86_FEATURE_VNMI); | ||
223 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { | ||
224 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | ||
225 | vmx_msr_low, vmx_msr_high); | ||
226 | msr_ctl2 = vmx_msr_high | vmx_msr_low; | ||
227 | if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && | ||
228 | (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) | ||
229 | set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | ||
230 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) | ||
231 | set_cpu_cap(c, X86_FEATURE_EPT); | ||
232 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) | ||
233 | set_cpu_cap(c, X86_FEATURE_VPID); | ||
234 | } | ||
112 | } | 235 | } |
113 | #endif | ||
114 | 236 | ||
115 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 237 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
116 | { | 238 | { |
117 | unsigned int l2 = 0; | 239 | unsigned int l2 = 0; |
118 | char *p = NULL; | ||
119 | 240 | ||
120 | early_init_intel(c); | 241 | early_init_intel(c); |
121 | 242 | ||
122 | #ifdef CONFIG_X86_F00F_BUG | 243 | intel_workarounds(c); |
123 | /* | ||
124 | * All current models of Pentium and Pentium with MMX technology CPUs | ||
125 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | ||
126 | * Note that the workaround only should be initialized once... | ||
127 | */ | ||
128 | c->f00f_bug = 0; | ||
129 | if (!paravirt_enabled() && c->x86 == 5) { | ||
130 | static int f00f_workaround_enabled; | ||
131 | |||
132 | c->f00f_bug = 1; | ||
133 | if (!f00f_workaround_enabled) { | ||
134 | trap_init_f00f_bug(); | ||
135 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | ||
136 | f00f_workaround_enabled = 1; | ||
137 | } | ||
138 | } | ||
139 | #endif | ||
140 | 244 | ||
141 | l2 = init_intel_cacheinfo(c); | 245 | l2 = init_intel_cacheinfo(c); |
142 | if (c->cpuid_level > 9) { | 246 | if (c->cpuid_level > 9) { |
@@ -146,16 +250,32 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
146 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | 250 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
147 | } | 251 | } |
148 | 252 | ||
149 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ | 253 | if (cpu_has_xmm2) |
150 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | 254 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
151 | clear_cpu_cap(c, X86_FEATURE_SEP); | 255 | if (cpu_has_ds) { |
256 | unsigned int l1; | ||
257 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
258 | if (!(l1 & (1<<11))) | ||
259 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
260 | if (!(l1 & (1<<12))) | ||
261 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
262 | ds_init_intel(c); | ||
263 | } | ||
152 | 264 | ||
265 | #ifdef CONFIG_X86_64 | ||
266 | if (c->x86 == 15) | ||
267 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
268 | if (c->x86 == 6) | ||
269 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
270 | #else | ||
153 | /* | 271 | /* |
154 | * Names for the Pentium II/Celeron processors | 272 | * Names for the Pentium II/Celeron processors |
155 | * detectable only by also checking the cache size. | 273 | * detectable only by also checking the cache size. |
156 | * Dixon is NOT a Celeron. | 274 | * Dixon is NOT a Celeron. |
157 | */ | 275 | */ |
158 | if (c->x86 == 6) { | 276 | if (c->x86 == 6) { |
277 | char *p = NULL; | ||
278 | |||
159 | switch (c->x86_model) { | 279 | switch (c->x86_model) { |
160 | case 5: | 280 | case 5: |
161 | if (c->x86_mask == 0) { | 281 | if (c->x86_mask == 0) { |
@@ -178,70 +298,41 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
178 | p = "Celeron (Coppermine)"; | 298 | p = "Celeron (Coppermine)"; |
179 | break; | 299 | break; |
180 | } | 300 | } |
181 | } | ||
182 | |||
183 | if (p) | ||
184 | strcpy(c->x86_model_id, p); | ||
185 | |||
186 | c->x86_max_cores = num_cpu_cores(c); | ||
187 | |||
188 | detect_ht(c); | ||
189 | 301 | ||
190 | /* Work around errata */ | 302 | if (p) |
191 | Intel_errata_workarounds(c); | 303 | strcpy(c->x86_model_id, p); |
192 | |||
193 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
194 | /* | ||
195 | * Set up the preferred alignment for movsl bulk memory moves | ||
196 | */ | ||
197 | switch (c->x86) { | ||
198 | case 4: /* 486: untested */ | ||
199 | break; | ||
200 | case 5: /* Old Pentia: untested */ | ||
201 | break; | ||
202 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | ||
203 | movsl_mask.mask = 7; | ||
204 | break; | ||
205 | case 15: /* P4 is OK down to 8-byte alignment */ | ||
206 | movsl_mask.mask = 7; | ||
207 | break; | ||
208 | } | 304 | } |
209 | #endif | ||
210 | 305 | ||
211 | if (cpu_has_xmm2) | 306 | if (c->x86 == 15) |
212 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
213 | if (c->x86 == 15) { | ||
214 | set_cpu_cap(c, X86_FEATURE_P4); | 307 | set_cpu_cap(c, X86_FEATURE_P4); |
215 | } | ||
216 | if (c->x86 == 6) | 308 | if (c->x86 == 6) |
217 | set_cpu_cap(c, X86_FEATURE_P3); | 309 | set_cpu_cap(c, X86_FEATURE_P3); |
218 | if (cpu_has_ds) { | ||
219 | unsigned int l1; | ||
220 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
221 | if (!(l1 & (1<<11))) | ||
222 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
223 | if (!(l1 & (1<<12))) | ||
224 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
225 | } | ||
226 | 310 | ||
227 | if (cpu_has_bts) | 311 | if (cpu_has_bts) |
228 | ds_init_intel(c); | 312 | ptrace_bts_init_intel(c); |
229 | 313 | ||
230 | /* | 314 | #endif |
231 | * See if we have a good local APIC by checking for buggy Pentia, | ||
232 | * i.e. all B steppings and the C2 stepping of P54C when using their | ||
233 | * integrated APIC (see 11AP erratum in "Pentium Processor | ||
234 | * Specification Update"). | ||
235 | */ | ||
236 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | ||
237 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | ||
238 | set_cpu_cap(c, X86_FEATURE_11AP); | ||
239 | 315 | ||
240 | #ifdef CONFIG_X86_NUMAQ | 316 | detect_extended_topology(c); |
241 | numaq_tsc_disable(); | 317 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { |
318 | /* | ||
319 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
320 | * detection. | ||
321 | */ | ||
322 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
323 | #ifdef CONFIG_X86_32 | ||
324 | detect_ht(c); | ||
242 | #endif | 325 | #endif |
326 | } | ||
327 | |||
328 | /* Work around errata */ | ||
329 | srat_detect_node(); | ||
330 | |||
331 | if (cpu_has(c, X86_FEATURE_VMX)) | ||
332 | detect_vmx_virtcap(c); | ||
243 | } | 333 | } |
244 | 334 | ||
335 | #ifdef CONFIG_X86_32 | ||
245 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 336 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
246 | { | 337 | { |
247 | /* | 338 | /* |
@@ -254,10 +345,12 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i | |||
254 | size = 256; | 345 | size = 256; |
255 | return size; | 346 | return size; |
256 | } | 347 | } |
348 | #endif | ||
257 | 349 | ||
258 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | 350 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
259 | .c_vendor = "Intel", | 351 | .c_vendor = "Intel", |
260 | .c_ident = { "GenuineIntel" }, | 352 | .c_ident = { "GenuineIntel" }, |
353 | #ifdef CONFIG_X86_32 | ||
261 | .c_models = { | 354 | .c_models = { |
262 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = | 355 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
263 | { | 356 | { |
@@ -307,76 +400,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
307 | } | 400 | } |
308 | }, | 401 | }, |
309 | }, | 402 | }, |
403 | .c_size_cache = intel_size_cache, | ||
404 | #endif | ||
310 | .c_early_init = early_init_intel, | 405 | .c_early_init = early_init_intel, |
311 | .c_init = init_intel, | 406 | .c_init = init_intel, |
312 | .c_size_cache = intel_size_cache, | 407 | .c_x86_vendor = X86_VENDOR_INTEL, |
313 | }; | 408 | }; |
314 | 409 | ||
315 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); | 410 | cpu_dev_register(intel_cpu_dev); |
316 | |||
317 | #ifndef CONFIG_X86_CMPXCHG | ||
318 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
319 | { | ||
320 | u8 prev; | ||
321 | unsigned long flags; | ||
322 | |||
323 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
324 | local_irq_save(flags); | ||
325 | prev = *(u8 *)ptr; | ||
326 | if (prev == old) | ||
327 | *(u8 *)ptr = new; | ||
328 | local_irq_restore(flags); | ||
329 | return prev; | ||
330 | } | ||
331 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
332 | |||
333 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
334 | { | ||
335 | u16 prev; | ||
336 | unsigned long flags; | ||
337 | |||
338 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
339 | local_irq_save(flags); | ||
340 | prev = *(u16 *)ptr; | ||
341 | if (prev == old) | ||
342 | *(u16 *)ptr = new; | ||
343 | local_irq_restore(flags); | ||
344 | return prev; | ||
345 | } | ||
346 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
347 | |||
348 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
349 | { | ||
350 | u32 prev; | ||
351 | unsigned long flags; | ||
352 | |||
353 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
354 | local_irq_save(flags); | ||
355 | prev = *(u32 *)ptr; | ||
356 | if (prev == old) | ||
357 | *(u32 *)ptr = new; | ||
358 | local_irq_restore(flags); | ||
359 | return prev; | ||
360 | } | ||
361 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
362 | #endif | ||
363 | |||
364 | #ifndef CONFIG_X86_CMPXCHG64 | ||
365 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
366 | { | ||
367 | u64 prev; | ||
368 | unsigned long flags; | ||
369 | |||
370 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
371 | local_irq_save(flags); | ||
372 | prev = *(u64 *)ptr; | ||
373 | if (prev == old) | ||
374 | *(u64 *)ptr = new; | ||
375 | local_irq_restore(flags); | ||
376 | return prev; | ||
377 | } | ||
378 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
379 | #endif | ||
380 | |||
381 | /* arch_initcall(intel_cpu_init); */ | ||
382 | 411 | ||