aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu')
-rw-r--r--arch/i386/kernel/cpu/amd.c8
-rw-r--r--arch/i386/kernel/cpu/centaur.c12
-rw-r--r--arch/i386/kernel/cpu/common.c11
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c71
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c9
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c12
-rw-r--r--arch/i386/kernel/cpu/cyrix.c18
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c15
-rw-r--r--arch/i386/kernel/cpu/nexgen.c8
-rw-r--r--arch/i386/kernel/cpu/rise.c8
-rw-r--r--arch/i386/kernel/cpu/transmeta.c11
-rw-r--r--arch/i386/kernel/cpu/umc.c8
14 files changed, 105 insertions, 99 deletions
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 333578a4e91a..0810f81f2a05 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -282,3 +282,11 @@ int __init amd_init_cpu(void)
282} 282}
283 283
284//early_arch_initcall(amd_init_cpu); 284//early_arch_initcall(amd_init_cpu);
285
286static int __init amd_exit_cpu(void)
287{
288 cpu_devs[X86_VENDOR_AMD] = NULL;
289 return 0;
290}
291
292late_initcall(amd_exit_cpu);
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
index 394814e57672..f52669ecb93f 100644
--- a/arch/i386/kernel/cpu/centaur.c
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -405,10 +405,6 @@ static void __init init_centaur(struct cpuinfo_x86 *c)
405 winchip2_protect_mcr(); 405 winchip2_protect_mcr();
406#endif 406#endif
407 break; 407 break;
408 case 10:
409 name="4";
410 /* no info on the WC4 yet */
411 break;
412 default: 408 default:
413 name="??"; 409 name="??";
414 } 410 }
@@ -474,3 +470,11 @@ int __init centaur_init_cpu(void)
474} 470}
475 471
476//early_arch_initcall(centaur_init_cpu); 472//early_arch_initcall(centaur_init_cpu);
473
474static int __init centaur_exit_cpu(void)
475{
476 cpu_devs[X86_VENDOR_CENTAUR] = NULL;
477 return 0;
478}
479
480late_initcall(centaur_exit_cpu);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 15aee26ec2b6..7eb9213734a3 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -44,6 +44,7 @@ static void default_init(struct cpuinfo_x86 * c)
44 44
45static struct cpu_dev default_cpu = { 45static struct cpu_dev default_cpu = {
46 .c_init = default_init, 46 .c_init = default_init,
47 .c_vendor = "Unknown",
47}; 48};
48static struct cpu_dev * this_cpu = &default_cpu; 49static struct cpu_dev * this_cpu = &default_cpu;
49 50
@@ -150,6 +151,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
150{ 151{
151 char *v = c->x86_vendor_id; 152 char *v = c->x86_vendor_id;
152 int i; 153 int i;
154 static int printed;
153 155
154 for (i = 0; i < X86_VENDOR_NUM; i++) { 156 for (i = 0; i < X86_VENDOR_NUM; i++) {
155 if (cpu_devs[i]) { 157 if (cpu_devs[i]) {
@@ -159,10 +161,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
159 c->x86_vendor = i; 161 c->x86_vendor = i;
160 if (!early) 162 if (!early)
161 this_cpu = cpu_devs[i]; 163 this_cpu = cpu_devs[i];
162 break; 164 return;
163 } 165 }
164 } 166 }
165 } 167 }
168 if (!printed) {
169 printed++;
170 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
171 printk(KERN_ERR "CPU: Your system may be unstable.\n");
172 }
173 c->x86_vendor = X86_VENDOR_UNKNOWN;
174 this_cpu = &default_cpu;
166} 175}
167 176
168 177
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 0f1eb507233b..26892d2099b0 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
96 96
97config X86_GX_SUSPMOD 97config X86_GX_SUSPMOD
98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" 98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
99 depends on PCI
99 help 100 help
100 This add the CPUFreq driver for NatSemi Geode processors which 101 This add the CPUFreq driver for NatSemi Geode processors which
101 support suspend modulation. 102 support suspend modulation.
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 7975e79d5fa4..3852d0a4c1b5 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq (
295} 295}
296 296
297 297
298/*
299 * acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
300 * of this driver
301 * @perf: processor-specific acpi_io_data struct
302 * @cpu: CPU being initialized
303 *
304 * To avoid issues with legacy OSes, some BIOSes require to be informed of
305 * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
306 * accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
307 * driver/acpi/processor.c
308 */
309static void
310acpi_processor_cpu_init_pdc_est(
311 struct acpi_processor_performance *perf,
312 unsigned int cpu,
313 struct acpi_object_list *obj_list
314 )
315{
316 union acpi_object *obj;
317 u32 *buf;
318 struct cpuinfo_x86 *c = cpu_data + cpu;
319 dprintk("acpi_processor_cpu_init_pdc_est\n");
320
321 if (!cpu_has(c, X86_FEATURE_EST))
322 return;
323
324 /* Initialize pdc. It will be used later. */
325 if (!obj_list)
326 return;
327
328 if (!(obj_list->count && obj_list->pointer))
329 return;
330
331 obj = obj_list->pointer;
332 if ((obj->buffer.length == 12) && obj->buffer.pointer) {
333 buf = (u32 *)obj->buffer.pointer;
334 buf[0] = ACPI_PDC_REVISION_ID;
335 buf[1] = 1;
336 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
337 perf->pdc = obj_list;
338 }
339 return;
340}
341
342
343/* CPU specific PDC initialization */
344static void
345acpi_processor_cpu_init_pdc(
346 struct acpi_processor_performance *perf,
347 unsigned int cpu,
348 struct acpi_object_list *obj_list
349 )
350{
351 struct cpuinfo_x86 *c = cpu_data + cpu;
352 dprintk("acpi_processor_cpu_init_pdc\n");
353 perf->pdc = NULL;
354 if (cpu_has(c, X86_FEATURE_EST))
355 acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
356 return;
357}
358
359
360static int 298static int
361acpi_cpufreq_cpu_init ( 299acpi_cpufreq_cpu_init (
362 struct cpufreq_policy *policy) 300 struct cpufreq_policy *policy)
@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init (
367 unsigned int result = 0; 305 unsigned int result = 0;
368 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 306 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
369 307
370 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
371 u32 arg0_buf[3];
372 struct acpi_object_list arg_list = {1, &arg0};
373
374 dprintk("acpi_cpufreq_cpu_init\n"); 308 dprintk("acpi_cpufreq_cpu_init\n");
375 /* setup arg_list for _PDC settings */
376 arg0.buffer.length = 12;
377 arg0.buffer.pointer = (u8 *) arg0_buf;
378 309
379 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 310 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
380 if (!data) 311 if (!data)
@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init (
382 313
383 acpi_io_data[cpu] = data; 314 acpi_io_data[cpu] = data;
384 315
385 acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
386 result = acpi_processor_register_performance(&data->acpi_data, cpu); 316 result = acpi_processor_register_performance(&data->acpi_data, cpu);
387 data->acpi_data.pdc = NULL;
388 317
389 if (result) 318 if (result)
390 goto err_free; 319 goto err_free;
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 270f2188d68b..cc73a7ae34bc 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -52,6 +52,7 @@ enum {
52 52
53 53
54static int has_N44_O17_errata[NR_CPUS]; 54static int has_N44_O17_errata[NR_CPUS];
55static int has_N60_errata[NR_CPUS];
55static unsigned int stock_freq; 56static unsigned int stock_freq;
56static struct cpufreq_driver p4clockmod_driver; 57static struct cpufreq_driver p4clockmod_driver;
57static unsigned int cpufreq_p4_get(unsigned int cpu); 58static unsigned int cpufreq_p4_get(unsigned int cpu);
@@ -226,6 +227,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
226 case 0x0f12: 227 case 0x0f12:
227 has_N44_O17_errata[policy->cpu] = 1; 228 has_N44_O17_errata[policy->cpu] = 1;
228 dprintk("has errata -- disabling low frequencies\n"); 229 dprintk("has errata -- disabling low frequencies\n");
230 break;
231
232 case 0x0f29:
233 has_N60_errata[policy->cpu] = 1;
234 dprintk("has errata -- disabling frequencies lower than 2ghz\n");
235 break;
229 } 236 }
230 237
231 /* get max frequency */ 238 /* get max frequency */
@@ -237,6 +244,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
237 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { 244 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
238 if ((i<2) && (has_N44_O17_errata[policy->cpu])) 245 if ((i<2) && (has_N44_O17_errata[policy->cpu]))
239 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; 246 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
247 else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000)
248 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
240 else 249 else
241 p4clockmod_table[i].frequency = (stock_freq * i)/8; 250 p4clockmod_table[i].frequency = (stock_freq * i)/8;
242 } 251 }
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 9a826cde4fd1..c173c0fa117a 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -362,22 +362,10 @@ static struct acpi_processor_performance p;
362 */ 362 */
363static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) 363static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
364{ 364{
365 union acpi_object arg0 = {ACPI_TYPE_BUFFER};
366 u32 arg0_buf[3];
367 struct acpi_object_list arg_list = {1, &arg0};
368 unsigned long cur_freq; 365 unsigned long cur_freq;
369 int result = 0, i; 366 int result = 0, i;
370 unsigned int cpu = policy->cpu; 367 unsigned int cpu = policy->cpu;
371 368
372 /* _PDC settings */
373 arg0.buffer.length = 12;
374 arg0.buffer.pointer = (u8 *) arg0_buf;
375 arg0_buf[0] = ACPI_PDC_REVISION_ID;
376 arg0_buf[1] = 1;
377 arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
378
379 p.pdc = &arg_list;
380
381 /* register with ACPI core */ 369 /* register with ACPI core */
382 if (acpi_processor_register_performance(&p, cpu)) { 370 if (acpi_processor_register_performance(&p, cpu)) {
383 dprintk(KERN_INFO PFX "obtaining ACPI data failed\n"); 371 dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index 75015975d038..00f2e058797c 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -345,7 +345,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
345/* 345/*
346 * Handle National Semiconductor branded processors 346 * Handle National Semiconductor branded processors
347 */ 347 */
348static void __devinit init_nsc(struct cpuinfo_x86 *c) 348static void __init init_nsc(struct cpuinfo_x86 *c)
349{ 349{
350 /* There may be GX1 processors in the wild that are branded 350 /* There may be GX1 processors in the wild that are branded
351 * NSC and not Cyrix. 351 * NSC and not Cyrix.
@@ -444,6 +444,14 @@ int __init cyrix_init_cpu(void)
444 444
445//early_arch_initcall(cyrix_init_cpu); 445//early_arch_initcall(cyrix_init_cpu);
446 446
447static int __init cyrix_exit_cpu(void)
448{
449 cpu_devs[X86_VENDOR_CYRIX] = NULL;
450 return 0;
451}
452
453late_initcall(cyrix_exit_cpu);
454
447static struct cpu_dev nsc_cpu_dev __initdata = { 455static struct cpu_dev nsc_cpu_dev __initdata = {
448 .c_vendor = "NSC", 456 .c_vendor = "NSC",
449 .c_ident = { "Geode by NSC" }, 457 .c_ident = { "Geode by NSC" },
@@ -458,3 +466,11 @@ int __init nsc_init_cpu(void)
458} 466}
459 467
460//early_arch_initcall(nsc_init_cpu); 468//early_arch_initcall(nsc_init_cpu);
469
470static int __init nsc_exit_cpu(void)
471{
472 cpu_devs[X86_VENDOR_NSC] = NULL;
473 return 0;
474}
475
476late_initcall(nsc_exit_cpu);
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index fbfd374aa336..ffe58cee0c48 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -43,13 +43,23 @@ static struct _cache_table cache_table[] __cpuinitdata =
43 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ 43 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
44 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ 44 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 45 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
46 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
46 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ 47 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
47 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 48 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
49 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
50 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ 51 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
49 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ 52 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
50 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ 53 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
51 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */ 54 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
52 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */ 55 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
56 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
57 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
58 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
59 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
60 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
53 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 63 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 64 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
55 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 65 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
@@ -57,6 +67,7 @@ static struct _cache_table cache_table[] __cpuinitdata =
57 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ 67 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
58 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ 68 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
59 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ 69 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
70 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
60 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */ 71 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
61 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 72 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
62 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 73 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -141,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
141 return 0; 152 return 0;
142} 153}
143 154
155/* will only be called once; __init is safe here */
144static int __init find_num_cache_leaves(void) 156static int __init find_num_cache_leaves(void)
145{ 157{
146 unsigned int eax, ebx, ecx, edx; 158 unsigned int eax, ebx, ecx, edx;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 1e9db198c440..3b4618bed70d 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -44,12 +44,10 @@
44#include <asm/msr.h> 44#include <asm/msr.h>
45#include "mtrr.h" 45#include "mtrr.h"
46 46
47#define MTRR_VERSION "2.0 (20020519)"
48
49u32 num_var_ranges = 0; 47u32 num_var_ranges = 0;
50 48
51unsigned int *usage_table; 49unsigned int *usage_table;
52static DECLARE_MUTEX(main_lock); 50static DECLARE_MUTEX(mtrr_sem);
53 51
54u32 size_or_mask, size_and_mask; 52u32 size_or_mask, size_and_mask;
55 53
@@ -335,7 +333,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
335 /* No CPU hotplug when we change MTRR entries */ 333 /* No CPU hotplug when we change MTRR entries */
336 lock_cpu_hotplug(); 334 lock_cpu_hotplug();
337 /* Search for existing MTRR */ 335 /* Search for existing MTRR */
338 down(&main_lock); 336 down(&mtrr_sem);
339 for (i = 0; i < num_var_ranges; ++i) { 337 for (i = 0; i < num_var_ranges; ++i) {
340 mtrr_if->get(i, &lbase, &lsize, &ltype); 338 mtrr_if->get(i, &lbase, &lsize, &ltype);
341 if (base >= lbase + lsize) 339 if (base >= lbase + lsize)
@@ -373,7 +371,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
373 printk(KERN_INFO "mtrr: no more MTRRs available\n"); 371 printk(KERN_INFO "mtrr: no more MTRRs available\n");
374 error = i; 372 error = i;
375 out: 373 out:
376 up(&main_lock); 374 up(&mtrr_sem);
377 unlock_cpu_hotplug(); 375 unlock_cpu_hotplug();
378 return error; 376 return error;
379} 377}
@@ -466,7 +464,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
466 max = num_var_ranges; 464 max = num_var_ranges;
467 /* No CPU hotplug when we change MTRR entries */ 465 /* No CPU hotplug when we change MTRR entries */
468 lock_cpu_hotplug(); 466 lock_cpu_hotplug();
469 down(&main_lock); 467 down(&mtrr_sem);
470 if (reg < 0) { 468 if (reg < 0) {
471 /* Search for existing MTRR */ 469 /* Search for existing MTRR */
472 for (i = 0; i < max; ++i) { 470 for (i = 0; i < max; ++i) {
@@ -505,7 +503,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
505 set_mtrr(reg, 0, 0, 0); 503 set_mtrr(reg, 0, 0, 0);
506 error = reg; 504 error = reg;
507 out: 505 out:
508 up(&main_lock); 506 up(&mtrr_sem);
509 unlock_cpu_hotplug(); 507 unlock_cpu_hotplug();
510 return error; 508 return error;
511} 509}
@@ -671,7 +669,6 @@ void __init mtrr_bp_init(void)
671 break; 669 break;
672 } 670 }
673 } 671 }
674 printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
675 672
676 if (mtrr_if) { 673 if (mtrr_if) {
677 set_num_var_ranges(); 674 set_num_var_ranges();
@@ -688,7 +685,7 @@ void mtrr_ap_init(void)
688 if (!mtrr_if || !use_intel()) 685 if (!mtrr_if || !use_intel())
689 return; 686 return;
690 /* 687 /*
691 * Ideally we should hold main_lock here to avoid mtrr entries changed, 688 * Ideally we should hold mtrr_sem here to avoid mtrr entries changed,
692 * but this routine will be called in cpu boot time, holding the lock 689 * but this routine will be called in cpu boot time, holding the lock
693 * breaks it. This routine is called in two cases: 1.very earily time 690 * breaks it. This routine is called in two cases: 1.very earily time
694 * of software resume, when there absolutely isn't mtrr entry changes; 691 * of software resume, when there absolutely isn't mtrr entry changes;
diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c
index 30898a260a5c..ad87fa58058d 100644
--- a/arch/i386/kernel/cpu/nexgen.c
+++ b/arch/i386/kernel/cpu/nexgen.c
@@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void)
61} 61}
62 62
63//early_arch_initcall(nexgen_init_cpu); 63//early_arch_initcall(nexgen_init_cpu);
64
65static int __init nexgen_exit_cpu(void)
66{
67 cpu_devs[X86_VENDOR_NEXGEN] = NULL;
68 return 0;
69}
70
71late_initcall(nexgen_exit_cpu);
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
index 8602425628ca..d08d5a2811c8 100644
--- a/arch/i386/kernel/cpu/rise.c
+++ b/arch/i386/kernel/cpu/rise.c
@@ -51,3 +51,11 @@ int __init rise_init_cpu(void)
51} 51}
52 52
53//early_arch_initcall(rise_init_cpu); 53//early_arch_initcall(rise_init_cpu);
54
55static int __init rise_exit_cpu(void)
56{
57 cpu_devs[X86_VENDOR_RISE] = NULL;
58 return 0;
59}
60
61late_initcall(rise_exit_cpu);
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index fc426380366b..7214c9b577ab 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -1,4 +1,5 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/mm.h>
2#include <linux/init.h> 3#include <linux/init.h>
3#include <asm/processor.h> 4#include <asm/processor.h>
4#include <asm/msr.h> 5#include <asm/msr.h>
@@ -84,7 +85,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c)
84#endif 85#endif
85} 86}
86 87
87static void transmeta_identify(struct cpuinfo_x86 * c) 88static void __init transmeta_identify(struct cpuinfo_x86 * c)
88{ 89{
89 u32 xlvl; 90 u32 xlvl;
90 generic_identify(c); 91 generic_identify(c);
@@ -111,3 +112,11 @@ int __init transmeta_init_cpu(void)
111} 112}
112 113
113//early_arch_initcall(transmeta_init_cpu); 114//early_arch_initcall(transmeta_init_cpu);
115
116static int __init transmeta_exit_cpu(void)
117{
118 cpu_devs[X86_VENDOR_TRANSMETA] = NULL;
119 return 0;
120}
121
122late_initcall(transmeta_exit_cpu);
diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c
index 264fcad559d5..2cd988f6dc55 100644
--- a/arch/i386/kernel/cpu/umc.c
+++ b/arch/i386/kernel/cpu/umc.c
@@ -31,3 +31,11 @@ int __init umc_init_cpu(void)
31} 31}
32 32
33//early_arch_initcall(umc_init_cpu); 33//early_arch_initcall(umc_init_cpu);
34
35static int __init umc_exit_cpu(void)
36{
37 cpu_devs[X86_VENDOR_UMC] = NULL;
38 return 0;
39}
40
41late_initcall(umc_exit_cpu);