diff options
author | Len Brown <len.brown@intel.com> | 2006-01-07 03:50:18 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2006-01-07 03:50:18 -0500 |
commit | ed03f430cdc8c802652467e9097606fedc2c7abc (patch) | |
tree | 30941ec1e6f93e99358fefe18175e5dd800a4379 /arch/i386/kernel | |
parent | ed349a8a0a780ed27e2a765f16cee54d9b63bfee (diff) | |
parent | 6f957eaf79356a32e838f5f262ee9a60544b1d5b (diff) |
Pull pnpacpi into acpica branch
Diffstat (limited to 'arch/i386/kernel')
29 files changed, 277 insertions, 179 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 496a2c9909fe..d8f94e78de8a 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -721,7 +721,7 @@ static int __init apic_set_verbosity(char *str) | |||
721 | apic_verbosity = APIC_VERBOSE; | 721 | apic_verbosity = APIC_VERBOSE; |
722 | else | 722 | else |
723 | printk(KERN_WARNING "APIC Verbosity level %s not recognised" | 723 | printk(KERN_WARNING "APIC Verbosity level %s not recognised" |
724 | " use apic=verbose or apic=debug", str); | 724 | " use apic=verbose or apic=debug\n", str); |
725 | 725 | ||
726 | return 0; | 726 | return 0; |
727 | } | 727 | } |
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index 1e60acbed3c1..2d793d4aef1a 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c | |||
@@ -303,17 +303,6 @@ extern int (*console_blank_hook)(int); | |||
303 | #include "apm.h" | 303 | #include "apm.h" |
304 | 304 | ||
305 | /* | 305 | /* |
306 | * Define to make all _set_limit calls use 64k limits. The APM 1.1 BIOS is | ||
307 | * supposed to provide limit information that it recognizes. Many machines | ||
308 | * do this correctly, but many others do not restrict themselves to their | ||
309 | * claimed limit. When this happens, they will cause a segmentation | ||
310 | * violation in the kernel at boot time. Most BIOS's, however, will | ||
311 | * respect a 64k limit, so we use that. If you want to be pedantic and | ||
312 | * hold your BIOS to its claims, then undefine this. | ||
313 | */ | ||
314 | #define APM_RELAX_SEGMENTS | ||
315 | |||
316 | /* | ||
317 | * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. | 306 | * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. |
318 | * This patched by Chad Miller <cmiller@surfsouth.com>, original code by | 307 | * This patched by Chad Miller <cmiller@surfsouth.com>, original code by |
319 | * David Chen <chen@ctpa04.mit.edu> | 308 | * David Chen <chen@ctpa04.mit.edu> |
@@ -1075,22 +1064,23 @@ static int apm_engage_power_management(u_short device, int enable) | |||
1075 | 1064 | ||
1076 | static int apm_console_blank(int blank) | 1065 | static int apm_console_blank(int blank) |
1077 | { | 1066 | { |
1078 | int error; | 1067 | int error, i; |
1079 | u_short state; | 1068 | u_short state; |
1069 | static const u_short dev[3] = { 0x100, 0x1FF, 0x101 }; | ||
1080 | 1070 | ||
1081 | state = blank ? APM_STATE_STANDBY : APM_STATE_READY; | 1071 | state = blank ? APM_STATE_STANDBY : APM_STATE_READY; |
1082 | /* Blank the first display device */ | 1072 | |
1083 | error = set_power_state(0x100, state); | 1073 | for (i = 0; i < ARRAY_SIZE(dev); i++) { |
1084 | if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) { | 1074 | error = set_power_state(dev[i], state); |
1085 | /* try to blank them all instead */ | 1075 | |
1086 | error = set_power_state(0x1ff, state); | 1076 | if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) |
1087 | if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) | 1077 | return 1; |
1088 | /* try to blank device one instead */ | 1078 | |
1089 | error = set_power_state(0x101, state); | 1079 | if (error == APM_NOT_ENGAGED) |
1080 | break; | ||
1090 | } | 1081 | } |
1091 | if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) | 1082 | |
1092 | return 1; | 1083 | if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) { |
1093 | if (error == APM_NOT_ENGAGED) { | ||
1094 | static int tried; | 1084 | static int tried; |
1095 | int eng_error; | 1085 | int eng_error; |
1096 | if (tried++ == 0) { | 1086 | if (tried++ == 0) { |
@@ -2233,8 +2223,8 @@ static struct dmi_system_id __initdata apm_dmi_table[] = { | |||
2233 | static int __init apm_init(void) | 2223 | static int __init apm_init(void) |
2234 | { | 2224 | { |
2235 | struct proc_dir_entry *apm_proc; | 2225 | struct proc_dir_entry *apm_proc; |
2226 | struct desc_struct *gdt; | ||
2236 | int ret; | 2227 | int ret; |
2237 | int i; | ||
2238 | 2228 | ||
2239 | dmi_check_system(apm_dmi_table); | 2229 | dmi_check_system(apm_dmi_table); |
2240 | 2230 | ||
@@ -2312,45 +2302,30 @@ static int __init apm_init(void) | |||
2312 | set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); | 2302 | set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); |
2313 | _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); | 2303 | _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); |
2314 | 2304 | ||
2305 | /* | ||
2306 | * Set up the long jump entry point to the APM BIOS, which is called | ||
2307 | * from inline assembly. | ||
2308 | */ | ||
2315 | apm_bios_entry.offset = apm_info.bios.offset; | 2309 | apm_bios_entry.offset = apm_info.bios.offset; |
2316 | apm_bios_entry.segment = APM_CS; | 2310 | apm_bios_entry.segment = APM_CS; |
2317 | 2311 | ||
2318 | for (i = 0; i < NR_CPUS; i++) { | 2312 | /* |
2319 | struct desc_struct *gdt = get_cpu_gdt_table(i); | 2313 | * The APM 1.1 BIOS is supposed to provide limit information that it |
2320 | set_base(gdt[APM_CS >> 3], | 2314 | * recognizes. Many machines do this correctly, but many others do |
2321 | __va((unsigned long)apm_info.bios.cseg << 4)); | 2315 | * not restrict themselves to their claimed limit. When this happens, |
2322 | set_base(gdt[APM_CS_16 >> 3], | 2316 | * they will cause a segmentation violation in the kernel at boot time. |
2323 | __va((unsigned long)apm_info.bios.cseg_16 << 4)); | 2317 | * Most BIOS's, however, will respect a 64k limit, so we use that. |
2324 | set_base(gdt[APM_DS >> 3], | 2318 | * |
2325 | __va((unsigned long)apm_info.bios.dseg << 4)); | 2319 | * Note we only set APM segments on CPU zero, since we pin the APM |
2326 | #ifndef APM_RELAX_SEGMENTS | 2320 | * code to that CPU. |
2327 | if (apm_info.bios.version == 0x100) { | 2321 | */ |
2328 | #endif | 2322 | gdt = get_cpu_gdt_table(0); |
2329 | /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ | 2323 | set_base(gdt[APM_CS >> 3], |
2330 | _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1); | 2324 | __va((unsigned long)apm_info.bios.cseg << 4)); |
2331 | /* For some unknown machine. */ | 2325 | set_base(gdt[APM_CS_16 >> 3], |
2332 | _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1); | 2326 | __va((unsigned long)apm_info.bios.cseg_16 << 4)); |
2333 | /* For the DEC Hinote Ultra CT475 (and others?) */ | 2327 | set_base(gdt[APM_DS >> 3], |
2334 | _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1); | 2328 | __va((unsigned long)apm_info.bios.dseg << 4)); |
2335 | #ifndef APM_RELAX_SEGMENTS | ||
2336 | } else { | ||
2337 | _set_limit((char *)&gdt[APM_CS >> 3], | ||
2338 | (apm_info.bios.cseg_len - 1) & 0xffff); | ||
2339 | _set_limit((char *)&gdt[APM_CS_16 >> 3], | ||
2340 | (apm_info.bios.cseg_16_len - 1) & 0xffff); | ||
2341 | _set_limit((char *)&gdt[APM_DS >> 3], | ||
2342 | (apm_info.bios.dseg_len - 1) & 0xffff); | ||
2343 | /* workaround for broken BIOSes */ | ||
2344 | if (apm_info.bios.cseg_len <= apm_info.bios.offset) | ||
2345 | _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1); | ||
2346 | if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ | ||
2347 | /* for the BIOS that assumes granularity = 1 */ | ||
2348 | gdt[APM_DS >> 3].b |= 0x800000; | ||
2349 | printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); | ||
2350 | } | ||
2351 | } | ||
2352 | #endif | ||
2353 | } | ||
2354 | 2329 | ||
2355 | apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info); | 2330 | apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info); |
2356 | if (apm_proc) | 2331 | if (apm_proc) |
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index e344ef88cfcd..e7697e077f6b 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c | |||
@@ -161,8 +161,13 @@ static void __init init_amd(struct cpuinfo_x86 *c) | |||
161 | set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); | 161 | set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); |
162 | break; | 162 | break; |
163 | } | 163 | } |
164 | break; | ||
165 | 164 | ||
165 | if (c->x86_model == 10) { | ||
166 | /* AMD Geode LX is model 10 */ | ||
167 | /* placeholder for any needed mods */ | ||
168 | break; | ||
169 | } | ||
170 | break; | ||
166 | case 6: /* An Athlon/Duron */ | 171 | case 6: /* An Athlon/Duron */ |
167 | 172 | ||
168 | /* Bit 15 of Athlon specific MSR 15, needs to be 0 | 173 | /* Bit 15 of Athlon specific MSR 15, needs to be 0 |
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 31e344b26bae..cca655688ffc 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -18,9 +18,6 @@ | |||
18 | 18 | ||
19 | #include "cpu.h" | 19 | #include "cpu.h" |
20 | 20 | ||
21 | DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); | ||
22 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_table); | ||
23 | |||
24 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | 21 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); |
25 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); | 22 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); |
26 | 23 | ||
@@ -599,11 +596,6 @@ void __devinit cpu_init(void) | |||
599 | load_idt(&idt_descr); | 596 | load_idt(&idt_descr); |
600 | 597 | ||
601 | /* | 598 | /* |
602 | * Delete NT | ||
603 | */ | ||
604 | __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl"); | ||
605 | |||
606 | /* | ||
607 | * Set up and load the per-CPU TSS and LDT | 599 | * Set up and load the per-CPU TSS and LDT |
608 | */ | 600 | */ |
609 | atomic_inc(&init_mm.mm_count); | 601 | atomic_inc(&init_mm.mm_count); |
diff --git a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c index 04a405345203..2b62dee35c6c 100644 --- a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c | |||
@@ -177,9 +177,10 @@ static unsigned int nforce2_fsb_read(int bootfsb) | |||
177 | */ | 177 | */ |
178 | static int nforce2_set_fsb(unsigned int fsb) | 178 | static int nforce2_set_fsb(unsigned int fsb) |
179 | { | 179 | { |
180 | u32 pll, temp = 0; | 180 | u32 temp = 0; |
181 | unsigned int tfsb; | 181 | unsigned int tfsb; |
182 | int diff; | 182 | int diff; |
183 | int pll = 0; | ||
183 | 184 | ||
184 | if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) { | 185 | if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) { |
185 | printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb); | 186 | printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb); |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 68a1fc87f4ca..0fbbd4c1072e 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #define PFX "powernow-k8: " | 46 | #define PFX "powernow-k8: " |
47 | #define BFX PFX "BIOS error: " | 47 | #define BFX PFX "BIOS error: " |
48 | #define VERSION "version 1.50.4" | 48 | #define VERSION "version 1.60.0" |
49 | #include "powernow-k8.h" | 49 | #include "powernow-k8.h" |
50 | 50 | ||
51 | /* serialize freq changes */ | 51 | /* serialize freq changes */ |
@@ -216,10 +216,10 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) | |||
216 | 216 | ||
217 | do { | 217 | do { |
218 | wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); | 218 | wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); |
219 | if (i++ > 100) { | 219 | if (i++ > 100) { |
220 | printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); | 220 | printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); |
221 | return 1; | 221 | return 1; |
222 | } | 222 | } |
223 | } while (query_current_values_with_pending_wait(data)); | 223 | } while (query_current_values_with_pending_wait(data)); |
224 | 224 | ||
225 | if (savefid != data->currfid) { | 225 | if (savefid != data->currfid) { |
@@ -336,7 +336,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid | |||
336 | /* Phase 2 - core frequency transition */ | 336 | /* Phase 2 - core frequency transition */ |
337 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | 337 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) |
338 | { | 338 | { |
339 | u32 vcoreqfid, vcocurrfid, vcofiddiff, savevid = data->currvid; | 339 | u32 vcoreqfid, vcocurrfid, vcofiddiff, fid_interval, savevid = data->currvid; |
340 | 340 | ||
341 | if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { | 341 | if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) { |
342 | printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n", | 342 | printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n", |
@@ -359,9 +359,11 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
359 | : vcoreqfid - vcocurrfid; | 359 | : vcoreqfid - vcocurrfid; |
360 | 360 | ||
361 | while (vcofiddiff > 2) { | 361 | while (vcofiddiff > 2) { |
362 | (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); | ||
363 | |||
362 | if (reqfid > data->currfid) { | 364 | if (reqfid > data->currfid) { |
363 | if (data->currfid > LO_FID_TABLE_TOP) { | 365 | if (data->currfid > LO_FID_TABLE_TOP) { |
364 | if (write_new_fid(data, data->currfid + 2)) { | 366 | if (write_new_fid(data, data->currfid + fid_interval)) { |
365 | return 1; | 367 | return 1; |
366 | } | 368 | } |
367 | } else { | 369 | } else { |
@@ -371,7 +373,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
371 | } | 373 | } |
372 | } | 374 | } |
373 | } else { | 375 | } else { |
374 | if (write_new_fid(data, data->currfid - 2)) | 376 | if (write_new_fid(data, data->currfid - fid_interval)) |
375 | return 1; | 377 | return 1; |
376 | } | 378 | } |
377 | 379 | ||
@@ -464,7 +466,7 @@ static int check_supported_cpu(unsigned int cpu) | |||
464 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 466 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
465 | 467 | ||
466 | if (smp_processor_id() != cpu) { | 468 | if (smp_processor_id() != cpu) { |
467 | printk(KERN_ERR "limiting to cpu %u failed\n", cpu); | 469 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); |
468 | goto out; | 470 | goto out; |
469 | } | 471 | } |
470 | 472 | ||
@@ -474,7 +476,7 @@ static int check_supported_cpu(unsigned int cpu) | |||
474 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | 476 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); |
475 | if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || | 477 | if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || |
476 | ((eax & CPUID_XFAM) != CPUID_XFAM_K8) || | 478 | ((eax & CPUID_XFAM) != CPUID_XFAM_K8) || |
477 | ((eax & CPUID_XMOD) > CPUID_XMOD_REV_F)) { | 479 | ((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) { |
478 | printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); | 480 | printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); |
479 | goto out; | 481 | goto out; |
480 | } | 482 | } |
@@ -517,22 +519,24 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 | |||
517 | printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j); | 519 | printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j); |
518 | return -ENODEV; | 520 | return -ENODEV; |
519 | } | 521 | } |
520 | if ((pst[j].fid > MAX_FID) | 522 | if (pst[j].fid > MAX_FID) { |
521 | || (pst[j].fid & 1) | 523 | printk(KERN_ERR BFX "maxfid exceeded with pstate %d\n", j); |
522 | || (j && (pst[j].fid < HI_FID_TABLE_BOTTOM))) { | 524 | return -ENODEV; |
525 | } | ||
526 | if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { | ||
523 | /* Only first fid is allowed to be in "low" range */ | 527 | /* Only first fid is allowed to be in "low" range */ |
524 | printk(KERN_ERR PFX "two low fids - %d : 0x%x\n", j, pst[j].fid); | 528 | printk(KERN_ERR BFX "two low fids - %d : 0x%x\n", j, pst[j].fid); |
525 | return -EINVAL; | 529 | return -EINVAL; |
526 | } | 530 | } |
527 | if (pst[j].fid < lastfid) | 531 | if (pst[j].fid < lastfid) |
528 | lastfid = pst[j].fid; | 532 | lastfid = pst[j].fid; |
529 | } | 533 | } |
530 | if (lastfid & 1) { | 534 | if (lastfid & 1) { |
531 | printk(KERN_ERR PFX "lastfid invalid\n"); | 535 | printk(KERN_ERR BFX "lastfid invalid\n"); |
532 | return -EINVAL; | 536 | return -EINVAL; |
533 | } | 537 | } |
534 | if (lastfid > LO_FID_TABLE_TOP) | 538 | if (lastfid > LO_FID_TABLE_TOP) |
535 | printk(KERN_INFO PFX "first fid not from lo freq table\n"); | 539 | printk(KERN_INFO BFX "first fid not from lo freq table\n"); |
536 | 540 | ||
537 | return 0; | 541 | return 0; |
538 | } | 542 | } |
@@ -631,7 +635,7 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
631 | 635 | ||
632 | dprintk("table vers: 0x%x\n", psb->tableversion); | 636 | dprintk("table vers: 0x%x\n", psb->tableversion); |
633 | if (psb->tableversion != PSB_VERSION_1_4) { | 637 | if (psb->tableversion != PSB_VERSION_1_4) { |
634 | printk(KERN_INFO BFX "PSB table is not v1.4\n"); | 638 | printk(KERN_ERR BFX "PSB table is not v1.4\n"); |
635 | return -ENODEV; | 639 | return -ENODEV; |
636 | } | 640 | } |
637 | 641 | ||
@@ -689,7 +693,7 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
689 | * BIOS and Kernel Developer's Guide, which is available on | 693 | * BIOS and Kernel Developer's Guide, which is available on |
690 | * www.amd.com | 694 | * www.amd.com |
691 | */ | 695 | */ |
692 | printk(KERN_INFO PFX "BIOS error - no PSB or ACPI _PSS objects\n"); | 696 | printk(KERN_ERR PFX "BIOS error - no PSB or ACPI _PSS objects\n"); |
693 | return -ENODEV; | 697 | return -ENODEV; |
694 | } | 698 | } |
695 | 699 | ||
@@ -912,7 +916,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
912 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); | 916 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); |
913 | 917 | ||
914 | if (smp_processor_id() != pol->cpu) { | 918 | if (smp_processor_id() != pol->cpu) { |
915 | printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); | 919 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
916 | goto err_out; | 920 | goto err_out; |
917 | } | 921 | } |
918 | 922 | ||
@@ -982,6 +986,9 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
982 | cpumask_t oldmask = CPU_MASK_ALL; | 986 | cpumask_t oldmask = CPU_MASK_ALL; |
983 | int rc, i; | 987 | int rc, i; |
984 | 988 | ||
989 | if (!cpu_online(pol->cpu)) | ||
990 | return -ENODEV; | ||
991 | |||
985 | if (!check_supported_cpu(pol->cpu)) | 992 | if (!check_supported_cpu(pol->cpu)) |
986 | return -ENODEV; | 993 | return -ENODEV; |
987 | 994 | ||
@@ -1021,7 +1028,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1021 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); | 1028 | set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); |
1022 | 1029 | ||
1023 | if (smp_processor_id() != pol->cpu) { | 1030 | if (smp_processor_id() != pol->cpu) { |
1024 | printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); | 1031 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
1025 | goto err_out; | 1032 | goto err_out; |
1026 | } | 1033 | } |
1027 | 1034 | ||
@@ -1162,10 +1169,9 @@ static void __exit powernowk8_exit(void) | |||
1162 | cpufreq_unregister_driver(&cpufreq_amd64_driver); | 1169 | cpufreq_unregister_driver(&cpufreq_amd64_driver); |
1163 | } | 1170 | } |
1164 | 1171 | ||
1165 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com."); | 1172 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); |
1166 | MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); | 1173 | MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); |
1167 | MODULE_LICENSE("GPL"); | 1174 | MODULE_LICENSE("GPL"); |
1168 | 1175 | ||
1169 | late_initcall(powernowk8_init); | 1176 | late_initcall(powernowk8_init); |
1170 | module_exit(powernowk8_exit); | 1177 | module_exit(powernowk8_exit); |
1171 | |||
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h index b1e85bb36396..d0de37d58e9a 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -42,7 +42,7 @@ struct powernow_k8_data { | |||
42 | #define CPUID_XFAM 0x0ff00000 /* extended family */ | 42 | #define CPUID_XFAM 0x0ff00000 /* extended family */ |
43 | #define CPUID_XFAM_K8 0 | 43 | #define CPUID_XFAM_K8 0 |
44 | #define CPUID_XMOD 0x000f0000 /* extended model */ | 44 | #define CPUID_XMOD 0x000f0000 /* extended model */ |
45 | #define CPUID_XMOD_REV_F 0x00040000 | 45 | #define CPUID_XMOD_REV_G 0x00060000 |
46 | #define CPUID_USE_XFAM_XMOD 0x00000f00 | 46 | #define CPUID_USE_XFAM_XMOD 0x00000f00 |
47 | #define CPUID_GET_MAX_CAPABILITIES 0x80000000 | 47 | #define CPUID_GET_MAX_CAPABILITIES 0x80000000 |
48 | #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 | 48 | #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 |
@@ -86,13 +86,14 @@ struct powernow_k8_data { | |||
86 | * low fid table | 86 | * low fid table |
87 | * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry | 87 | * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry |
88 | * in the low fid table | 88 | * in the low fid table |
89 | * - the parts can only step at 200 MHz intervals, so 1.9 GHz is never valid | 89 | * - the parts can only step at <= 200 MHz intervals, odd fid values are |
90 | * supported in revision G and later revisions. | ||
90 | * - lowest frequency must be >= interprocessor hypertransport link speed | 91 | * - lowest frequency must be >= interprocessor hypertransport link speed |
91 | * (only applies to MP systems obviously) | 92 | * (only applies to MP systems obviously) |
92 | */ | 93 | */ |
93 | 94 | ||
94 | /* fids (frequency identifiers) are arranged in 2 tables - lo and hi */ | 95 | /* fids (frequency identifiers) are arranged in 2 tables - lo and hi */ |
95 | #define LO_FID_TABLE_TOP 6 /* fid values marking the boundary */ | 96 | #define LO_FID_TABLE_TOP 7 /* fid values marking the boundary */ |
96 | #define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */ | 97 | #define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */ |
97 | 98 | ||
98 | #define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */ | 99 | #define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */ |
@@ -106,7 +107,7 @@ struct powernow_k8_data { | |||
106 | #define MIN_FREQ 800 /* Min and max freqs, per spec */ | 107 | #define MIN_FREQ 800 /* Min and max freqs, per spec */ |
107 | #define MAX_FREQ 5000 | 108 | #define MAX_FREQ 5000 |
108 | 109 | ||
109 | #define INVALID_FID_MASK 0xffffffc1 /* not a valid fid if these bits are set */ | 110 | #define INVALID_FID_MASK 0xffffffc0 /* not a valid fid if these bits are set */ |
110 | #define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */ | 111 | #define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */ |
111 | 112 | ||
112 | #define VID_OFF 0x3f | 113 | #define VID_OFF 0x3f |
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c index 5b7d18a06afa..b425cd3d1838 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -40,6 +40,7 @@ static struct pci_dev *speedstep_chipset_dev; | |||
40 | */ | 40 | */ |
41 | static unsigned int speedstep_processor = 0; | 41 | static unsigned int speedstep_processor = 0; |
42 | 42 | ||
43 | static u32 pmbase; | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * There are only two frequency states for each processor. Values | 46 | * There are only two frequency states for each processor. Values |
@@ -56,34 +57,47 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { | |||
56 | 57 | ||
57 | 58 | ||
58 | /** | 59 | /** |
59 | * speedstep_set_state - set the SpeedStep state | 60 | * speedstep_find_register - read the PMBASE address |
60 | * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) | ||
61 | * | 61 | * |
62 | * Tries to change the SpeedStep state. | 62 | * Returns: -ENODEV if no register could be found |
63 | */ | 63 | */ |
64 | static void speedstep_set_state (unsigned int state) | 64 | static int speedstep_find_register (void) |
65 | { | 65 | { |
66 | u32 pmbase; | 66 | if (!speedstep_chipset_dev) |
67 | u8 pm2_blk; | 67 | return -ENODEV; |
68 | u8 value; | ||
69 | unsigned long flags; | ||
70 | |||
71 | if (!speedstep_chipset_dev || (state > 0x1)) | ||
72 | return; | ||
73 | 68 | ||
74 | /* get PMBASE */ | 69 | /* get PMBASE */ |
75 | pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); | 70 | pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); |
76 | if (!(pmbase & 0x01)) { | 71 | if (!(pmbase & 0x01)) { |
77 | printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); | 72 | printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); |
78 | return; | 73 | return -ENODEV; |
79 | } | 74 | } |
80 | 75 | ||
81 | pmbase &= 0xFFFFFFFE; | 76 | pmbase &= 0xFFFFFFFE; |
82 | if (!pmbase) { | 77 | if (!pmbase) { |
83 | printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); | 78 | printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); |
84 | return; | 79 | return -ENODEV; |
85 | } | 80 | } |
86 | 81 | ||
82 | dprintk("pmbase is 0x%x\n", pmbase); | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * speedstep_set_state - set the SpeedStep state | ||
88 | * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) | ||
89 | * | ||
90 | * Tries to change the SpeedStep state. | ||
91 | */ | ||
92 | static void speedstep_set_state (unsigned int state) | ||
93 | { | ||
94 | u8 pm2_blk; | ||
95 | u8 value; | ||
96 | unsigned long flags; | ||
97 | |||
98 | if (state > 0x1) | ||
99 | return; | ||
100 | |||
87 | /* Disable IRQs */ | 101 | /* Disable IRQs */ |
88 | local_irq_save(flags); | 102 | local_irq_save(flags); |
89 | 103 | ||
@@ -315,10 +329,11 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
315 | cpus_allowed = current->cpus_allowed; | 329 | cpus_allowed = current->cpus_allowed; |
316 | set_cpus_allowed(current, policy->cpus); | 330 | set_cpus_allowed(current, policy->cpus); |
317 | 331 | ||
318 | /* detect low and high frequency */ | 332 | /* detect low and high frequency and transition latency */ |
319 | result = speedstep_get_freqs(speedstep_processor, | 333 | result = speedstep_get_freqs(speedstep_processor, |
320 | &speedstep_freqs[SPEEDSTEP_LOW].frequency, | 334 | &speedstep_freqs[SPEEDSTEP_LOW].frequency, |
321 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency, | 335 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency, |
336 | &policy->cpuinfo.transition_latency, | ||
322 | &speedstep_set_state); | 337 | &speedstep_set_state); |
323 | set_cpus_allowed(current, cpus_allowed); | 338 | set_cpus_allowed(current, cpus_allowed); |
324 | if (result) | 339 | if (result) |
@@ -335,7 +350,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
335 | 350 | ||
336 | /* cpuinfo and default policy values */ | 351 | /* cpuinfo and default policy values */ |
337 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | 352 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; |
338 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
339 | policy->cur = speed; | 353 | policy->cur = speed; |
340 | 354 | ||
341 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); | 355 | result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); |
@@ -400,6 +414,9 @@ static int __init speedstep_init(void) | |||
400 | return -EINVAL; | 414 | return -EINVAL; |
401 | } | 415 | } |
402 | 416 | ||
417 | if (speedstep_find_register()) | ||
418 | return -ENODEV; | ||
419 | |||
403 | return cpufreq_register_driver(&speedstep_driver); | 420 | return cpufreq_register_driver(&speedstep_driver); |
404 | } | 421 | } |
405 | 422 | ||
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c index d368b3f5fce8..7c47005a1805 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c | |||
@@ -320,11 +320,13 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor); | |||
320 | unsigned int speedstep_get_freqs(unsigned int processor, | 320 | unsigned int speedstep_get_freqs(unsigned int processor, |
321 | unsigned int *low_speed, | 321 | unsigned int *low_speed, |
322 | unsigned int *high_speed, | 322 | unsigned int *high_speed, |
323 | unsigned int *transition_latency, | ||
323 | void (*set_state) (unsigned int state)) | 324 | void (*set_state) (unsigned int state)) |
324 | { | 325 | { |
325 | unsigned int prev_speed; | 326 | unsigned int prev_speed; |
326 | unsigned int ret = 0; | 327 | unsigned int ret = 0; |
327 | unsigned long flags; | 328 | unsigned long flags; |
329 | struct timeval tv1, tv2; | ||
328 | 330 | ||
329 | if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) | 331 | if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) |
330 | return -EINVAL; | 332 | return -EINVAL; |
@@ -337,7 +339,7 @@ unsigned int speedstep_get_freqs(unsigned int processor, | |||
337 | return -EIO; | 339 | return -EIO; |
338 | 340 | ||
339 | dprintk("previous speed is %u\n", prev_speed); | 341 | dprintk("previous speed is %u\n", prev_speed); |
340 | 342 | ||
341 | local_irq_save(flags); | 343 | local_irq_save(flags); |
342 | 344 | ||
343 | /* switch to low state */ | 345 | /* switch to low state */ |
@@ -350,8 +352,17 @@ unsigned int speedstep_get_freqs(unsigned int processor, | |||
350 | 352 | ||
351 | dprintk("low speed is %u\n", *low_speed); | 353 | dprintk("low speed is %u\n", *low_speed); |
352 | 354 | ||
355 | /* start latency measurement */ | ||
356 | if (transition_latency) | ||
357 | do_gettimeofday(&tv1); | ||
358 | |||
353 | /* switch to high state */ | 359 | /* switch to high state */ |
354 | set_state(SPEEDSTEP_HIGH); | 360 | set_state(SPEEDSTEP_HIGH); |
361 | |||
362 | /* end latency measurement */ | ||
363 | if (transition_latency) | ||
364 | do_gettimeofday(&tv2); | ||
365 | |||
355 | *high_speed = speedstep_get_processor_frequency(processor); | 366 | *high_speed = speedstep_get_processor_frequency(processor); |
356 | if (!*high_speed) { | 367 | if (!*high_speed) { |
357 | ret = -EIO; | 368 | ret = -EIO; |
@@ -369,6 +380,25 @@ unsigned int speedstep_get_freqs(unsigned int processor, | |||
369 | if (*high_speed != prev_speed) | 380 | if (*high_speed != prev_speed) |
370 | set_state(SPEEDSTEP_LOW); | 381 | set_state(SPEEDSTEP_LOW); |
371 | 382 | ||
383 | if (transition_latency) { | ||
384 | *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + | ||
385 | tv2.tv_usec - tv1.tv_usec; | ||
386 | dprintk("transition latency is %u uSec\n", *transition_latency); | ||
387 | |||
388 | /* convert uSec to nSec and add 20% for safety reasons */ | ||
389 | *transition_latency *= 1200; | ||
390 | |||
391 | /* check if the latency measurement is too high or too low | ||
392 | * and set it to a safe value (500uSec) in that case | ||
393 | */ | ||
394 | if (*transition_latency > 10000000 || *transition_latency < 50000) { | ||
395 | printk (KERN_WARNING "speedstep: frequency transition measured seems out of " | ||
396 | "range (%u nSec), falling back to a safe one of %u nSec.\n", | ||
397 | *transition_latency, 500000); | ||
398 | *transition_latency = 500000; | ||
399 | } | ||
400 | } | ||
401 | |||
372 | out: | 402 | out: |
373 | local_irq_restore(flags); | 403 | local_irq_restore(flags); |
374 | return (ret); | 404 | return (ret); |
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h index 261a2c9b7f6b..6a727fd3a77e 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h | |||
@@ -44,4 +44,5 @@ extern unsigned int speedstep_get_processor_frequency(unsigned int processor); | |||
44 | extern unsigned int speedstep_get_freqs(unsigned int processor, | 44 | extern unsigned int speedstep_get_freqs(unsigned int processor, |
45 | unsigned int *low_speed, | 45 | unsigned int *low_speed, |
46 | unsigned int *high_speed, | 46 | unsigned int *high_speed, |
47 | unsigned int *transition_latency, | ||
47 | void (*set_state) (unsigned int state)); | 48 | void (*set_state) (unsigned int state)); |
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c index 2718fb6f6aba..28cc5d524afc 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c | |||
@@ -269,6 +269,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
269 | result = speedstep_get_freqs(speedstep_processor, | 269 | result = speedstep_get_freqs(speedstep_processor, |
270 | &speedstep_freqs[SPEEDSTEP_LOW].frequency, | 270 | &speedstep_freqs[SPEEDSTEP_LOW].frequency, |
271 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency, | 271 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency, |
272 | NULL, | ||
272 | &speedstep_set_state); | 273 | &speedstep_set_state); |
273 | 274 | ||
274 | if (result) { | 275 | if (result) { |
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c index ff87cc22b323..75015975d038 100644 --- a/arch/i386/kernel/cpu/cyrix.c +++ b/arch/i386/kernel/cpu/cyrix.c | |||
@@ -343,6 +343,31 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) | |||
343 | } | 343 | } |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * Handle National Semiconductor branded processors | ||
347 | */ | ||
348 | static void __devinit init_nsc(struct cpuinfo_x86 *c) | ||
349 | { | ||
350 | /* There may be GX1 processors in the wild that are branded | ||
351 | * NSC and not Cyrix. | ||
352 | * | ||
353 | * This function only handles the GX processor, and kicks every | ||
354 | * thing else to the Cyrix init function above - that should | ||
355 | * cover any processors that might have been branded differently | ||
356 | * after NSC aquired Cyrix. | ||
357 | * | ||
358 | * If this breaks your GX1 horribly, please e-mail | ||
359 | * info-linux@ldcmail.amd.com to tell us. | ||
360 | */ | ||
361 | |||
362 | /* Handle the GX (Formally known as the GX2) */ | ||
363 | |||
364 | if (c->x86 == 5 && c->x86_model == 5) | ||
365 | display_cacheinfo(c); | ||
366 | else | ||
367 | init_cyrix(c); | ||
368 | } | ||
369 | |||
370 | /* | ||
346 | * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected | 371 | * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected |
347 | * by the fact that they preserve the flags across the division of 5/2. | 372 | * by the fact that they preserve the flags across the division of 5/2. |
348 | * PII and PPro exhibit this behavior too, but they have cpuid available. | 373 | * PII and PPro exhibit this behavior too, but they have cpuid available. |
@@ -422,7 +447,7 @@ int __init cyrix_init_cpu(void) | |||
422 | static struct cpu_dev nsc_cpu_dev __initdata = { | 447 | static struct cpu_dev nsc_cpu_dev __initdata = { |
423 | .c_vendor = "NSC", | 448 | .c_vendor = "NSC", |
424 | .c_ident = { "Geode by NSC" }, | 449 | .c_ident = { "Geode by NSC" }, |
425 | .c_init = init_cyrix, | 450 | .c_init = init_nsc, |
426 | .c_identify = generic_identify, | 451 | .c_identify = generic_identify, |
427 | }; | 452 | }; |
428 | 453 | ||
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index e7921315ae9d..6d91b274589c 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/string.h> | 3 | #include <linux/string.h> |
4 | #include <asm/semaphore.h> | 4 | #include <asm/semaphore.h> |
5 | #include <linux/seq_file.h> | 5 | #include <linux/seq_file.h> |
6 | #include <linux/cpufreq.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * Get CPU information for use by the procfs. | 9 | * Get CPU information for use by the procfs. |
@@ -86,8 +87,11 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
86 | seq_printf(m, "stepping\t: unknown\n"); | 87 | seq_printf(m, "stepping\t: unknown\n"); |
87 | 88 | ||
88 | if ( cpu_has(c, X86_FEATURE_TSC) ) { | 89 | if ( cpu_has(c, X86_FEATURE_TSC) ) { |
90 | unsigned int freq = cpufreq_quick_get(n); | ||
91 | if (!freq) | ||
92 | freq = cpu_khz; | ||
89 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", | 93 | seq_printf(m, "cpu MHz\t\t: %u.%03u\n", |
90 | cpu_khz / 1000, (cpu_khz % 1000)); | 94 | freq / 1000, (freq % 1000)); |
91 | } | 95 | } |
92 | 96 | ||
93 | /* Cache size */ | 97 | /* Cache size */ |
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index 13bae799e626..006141d1c12a 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c | |||
@@ -117,14 +117,13 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
117 | { | 117 | { |
118 | char __user *tmp = buf; | 118 | char __user *tmp = buf; |
119 | u32 data[4]; | 119 | u32 data[4]; |
120 | size_t rv; | ||
121 | u32 reg = *ppos; | 120 | u32 reg = *ppos; |
122 | int cpu = iminor(file->f_dentry->d_inode); | 121 | int cpu = iminor(file->f_dentry->d_inode); |
123 | 122 | ||
124 | if (count % 16) | 123 | if (count % 16) |
125 | return -EINVAL; /* Invalid chunk size */ | 124 | return -EINVAL; /* Invalid chunk size */ |
126 | 125 | ||
127 | for (rv = 0; count; count -= 16) { | 126 | for (; count; count -= 16) { |
128 | do_cpuid(cpu, reg, data); | 127 | do_cpuid(cpu, reg, data); |
129 | if (copy_to_user(tmp, &data, 16)) | 128 | if (copy_to_user(tmp, &data, 16)) |
130 | return -EFAULT; | 129 | return -EFAULT; |
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index e50b93155249..607c06007508 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -657,6 +657,7 @@ ENTRY(spurious_interrupt_bug) | |||
657 | pushl $do_spurious_interrupt_bug | 657 | pushl $do_spurious_interrupt_bug |
658 | jmp error_code | 658 | jmp error_code |
659 | 659 | ||
660 | .section .rodata,"a" | ||
660 | #include "syscall_table.S" | 661 | #include "syscall_table.S" |
661 | 662 | ||
662 | syscall_table_size=(.-sys_call_table) | 663 | syscall_table_size=(.-sys_call_table) |
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index e437fb367498..5884469f6bfe 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S | |||
@@ -504,19 +504,24 @@ ENTRY(cpu_gdt_table) | |||
504 | .quad 0x0000000000000000 /* 0x80 TSS descriptor */ | 504 | .quad 0x0000000000000000 /* 0x80 TSS descriptor */ |
505 | .quad 0x0000000000000000 /* 0x88 LDT descriptor */ | 505 | .quad 0x0000000000000000 /* 0x88 LDT descriptor */ |
506 | 506 | ||
507 | /* Segments used for calling PnP BIOS */ | 507 | /* |
508 | .quad 0x00c09a0000000000 /* 0x90 32-bit code */ | 508 | * Segments used for calling PnP BIOS have byte granularity. |
509 | .quad 0x00809a0000000000 /* 0x98 16-bit code */ | 509 | * They code segments and data segments have fixed 64k limits, |
510 | .quad 0x0080920000000000 /* 0xa0 16-bit data */ | 510 | * the transfer segment sizes are set at run time. |
511 | .quad 0x0080920000000000 /* 0xa8 16-bit data */ | 511 | */ |
512 | .quad 0x0080920000000000 /* 0xb0 16-bit data */ | 512 | .quad 0x00409a000000ffff /* 0x90 32-bit code */ |
513 | .quad 0x00009a000000ffff /* 0x98 16-bit code */ | ||
514 | .quad 0x000092000000ffff /* 0xa0 16-bit data */ | ||
515 | .quad 0x0000920000000000 /* 0xa8 16-bit data */ | ||
516 | .quad 0x0000920000000000 /* 0xb0 16-bit data */ | ||
517 | |||
513 | /* | 518 | /* |
514 | * The APM segments have byte granularity and their bases | 519 | * The APM segments have byte granularity and their bases |
515 | * and limits are set at run time. | 520 | * are set at run time. All have 64k limits. |
516 | */ | 521 | */ |
517 | .quad 0x00409a0000000000 /* 0xb8 APM CS code */ | 522 | .quad 0x00409a000000ffff /* 0xb8 APM CS code */ |
518 | .quad 0x00009a0000000000 /* 0xc0 APM CS 16 code (16 bit) */ | 523 | .quad 0x00009a000000ffff /* 0xc0 APM CS 16 code (16 bit) */ |
519 | .quad 0x0040920000000000 /* 0xc8 APM DS data */ | 524 | .quad 0x004092000000ffff /* 0xc8 APM DS data */ |
520 | 525 | ||
521 | .quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */ | 526 | .quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */ |
522 | .quad 0x0000000000000000 /* 0xd8 - unused */ | 527 | .quad 0x0000000000000000 /* 0xd8 - unused */ |
@@ -525,3 +530,5 @@ ENTRY(cpu_gdt_table) | |||
525 | .quad 0x0000000000000000 /* 0xf0 - unused */ | 530 | .quad 0x0000000000000000 /* 0xf0 - unused */ |
526 | .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ | 531 | .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ |
527 | 532 | ||
533 | /* Be sure this is zeroed to avoid false validations in Xen */ | ||
534 | .fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0 | ||
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c index 180f070d03cb..3999bec50c33 100644 --- a/arch/i386/kernel/i386_ksyms.c +++ b/arch/i386/kernel/i386_ksyms.c | |||
@@ -3,8 +3,7 @@ | |||
3 | #include <asm/checksum.h> | 3 | #include <asm/checksum.h> |
4 | #include <asm/desc.h> | 4 | #include <asm/desc.h> |
5 | 5 | ||
6 | /* This is definitely a GPL-only symbol */ | 6 | EXPORT_SYMBOL_GPL(cpu_gdt_descr); |
7 | EXPORT_SYMBOL_GPL(cpu_gdt_table); | ||
8 | 7 | ||
9 | EXPORT_SYMBOL(__down_failed); | 8 | EXPORT_SYMBOL(__down_failed); |
10 | EXPORT_SYMBOL(__down_failed_interruptible); | 9 | EXPORT_SYMBOL(__down_failed_interruptible); |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 22c8675c79f4..7554f8fd874a 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -1722,8 +1722,8 @@ void disable_IO_APIC(void) | |||
1722 | entry.dest_mode = 0; /* Physical */ | 1722 | entry.dest_mode = 0; /* Physical */ |
1723 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | 1723 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1724 | entry.vector = 0; | 1724 | entry.vector = 0; |
1725 | entry.dest.physical.physical_dest = 0; | 1725 | entry.dest.physical.physical_dest = |
1726 | 1726 | GET_APIC_ID(apic_read(APIC_ID)); | |
1727 | 1727 | ||
1728 | /* | 1728 | /* |
1729 | * Add it to the IO-APIC irq-routing table: | 1729 | * Add it to the IO-APIC irq-routing table: |
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 32b0c24ab9a6..19edcd526ba4 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
@@ -191,7 +191,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
191 | */ | 191 | */ |
192 | save_previous_kprobe(kcb); | 192 | save_previous_kprobe(kcb); |
193 | set_current_kprobe(p, regs, kcb); | 193 | set_current_kprobe(p, regs, kcb); |
194 | p->nmissed++; | 194 | kprobes_inc_nmissed_count(p); |
195 | prepare_singlestep(p, regs); | 195 | prepare_singlestep(p, regs); |
196 | kcb->kprobe_status = KPROBE_REENTER; | 196 | kcb->kprobe_status = KPROBE_REENTER; |
197 | return 1; | 197 | return 1; |
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index d7cede83ba2e..0102f3d50e57 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c | |||
@@ -38,6 +38,12 @@ | |||
38 | int smp_found_config; | 38 | int smp_found_config; |
39 | unsigned int __initdata maxcpus = NR_CPUS; | 39 | unsigned int __initdata maxcpus = NR_CPUS; |
40 | 40 | ||
41 | #ifdef CONFIG_HOTPLUG_CPU | ||
42 | #define CPU_HOTPLUG_ENABLED (1) | ||
43 | #else | ||
44 | #define CPU_HOTPLUG_ENABLED (0) | ||
45 | #endif | ||
46 | |||
41 | /* | 47 | /* |
42 | * Various Linux-internal data structures created from the | 48 | * Various Linux-internal data structures created from the |
43 | * MP-table. | 49 | * MP-table. |
@@ -219,14 +225,18 @@ static void __devinit MP_processor_info (struct mpc_config_processor *m) | |||
219 | cpu_set(num_processors, cpu_possible_map); | 225 | cpu_set(num_processors, cpu_possible_map); |
220 | num_processors++; | 226 | num_processors++; |
221 | 227 | ||
222 | if ((num_processors > 8) && | 228 | if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) { |
223 | ((APIC_XAPIC(ver) && | 229 | switch (boot_cpu_data.x86_vendor) { |
224 | (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) || | 230 | case X86_VENDOR_INTEL: |
225 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))) | 231 | if (!APIC_XAPIC(ver)) { |
226 | def_to_bigsmp = 1; | 232 | def_to_bigsmp = 0; |
227 | else | 233 | break; |
228 | def_to_bigsmp = 0; | 234 | } |
229 | 235 | /* If P4 and above fall through */ | |
236 | case X86_VENDOR_AMD: | ||
237 | def_to_bigsmp = 1; | ||
238 | } | ||
239 | } | ||
230 | bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; | 240 | bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; |
231 | } | 241 | } |
232 | 242 | ||
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index 44470fea4309..1d0a55e68760 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c | |||
@@ -172,7 +172,6 @@ static ssize_t msr_read(struct file *file, char __user * buf, | |||
172 | { | 172 | { |
173 | u32 __user *tmp = (u32 __user *) buf; | 173 | u32 __user *tmp = (u32 __user *) buf; |
174 | u32 data[2]; | 174 | u32 data[2]; |
175 | size_t rv; | ||
176 | u32 reg = *ppos; | 175 | u32 reg = *ppos; |
177 | int cpu = iminor(file->f_dentry->d_inode); | 176 | int cpu = iminor(file->f_dentry->d_inode); |
178 | int err; | 177 | int err; |
@@ -180,7 +179,7 @@ static ssize_t msr_read(struct file *file, char __user * buf, | |||
180 | if (count % 8) | 179 | if (count % 8) |
181 | return -EINVAL; /* Invalid chunk size */ | 180 | return -EINVAL; /* Invalid chunk size */ |
182 | 181 | ||
183 | for (rv = 0; count; count -= 8) { | 182 | for (; count; count -= 8) { |
184 | err = do_rdmsr(cpu, reg, &data[0], &data[1]); | 183 | err = do_rdmsr(cpu, reg, &data[0], &data[1]); |
185 | if (err) | 184 | if (err) |
186 | return err; | 185 | return err; |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index df6c2bcde067..45e7f0ac4b04 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -308,9 +308,7 @@ void show_regs(struct pt_regs * regs) | |||
308 | cr0 = read_cr0(); | 308 | cr0 = read_cr0(); |
309 | cr2 = read_cr2(); | 309 | cr2 = read_cr2(); |
310 | cr3 = read_cr3(); | 310 | cr3 = read_cr3(); |
311 | if (current_cpu_data.x86 > 4) { | 311 | cr4 = read_cr4_safe(); |
312 | cr4 = read_cr4(); | ||
313 | } | ||
314 | printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); | 312 | printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); |
315 | show_trace(NULL, ®s->esp); | 313 | show_trace(NULL, ®s->esp); |
316 | } | 314 | } |
@@ -404,17 +402,7 @@ void flush_thread(void) | |||
404 | 402 | ||
405 | void release_thread(struct task_struct *dead_task) | 403 | void release_thread(struct task_struct *dead_task) |
406 | { | 404 | { |
407 | if (dead_task->mm) { | 405 | BUG_ON(dead_task->mm); |
408 | // temporary debugging check | ||
409 | if (dead_task->mm->context.size) { | ||
410 | printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", | ||
411 | dead_task->comm, | ||
412 | dead_task->mm->context.ldt, | ||
413 | dead_task->mm->context.size); | ||
414 | BUG(); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | release_vm86_irqs(dead_task); | 406 | release_vm86_irqs(dead_task); |
419 | } | 407 | } |
420 | 408 | ||
@@ -554,7 +542,9 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | |||
554 | struct pt_regs ptregs; | 542 | struct pt_regs ptregs; |
555 | 543 | ||
556 | ptregs = *(struct pt_regs *) | 544 | ptregs = *(struct pt_regs *) |
557 | ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs)); | 545 | ((unsigned long)tsk->thread_info + |
546 | /* see comments in copy_thread() about -8 */ | ||
547 | THREAD_SIZE - sizeof(ptregs) - 8); | ||
558 | ptregs.xcs &= 0xffff; | 548 | ptregs.xcs &= 0xffff; |
559 | ptregs.xds &= 0xffff; | 549 | ptregs.xds &= 0xffff; |
560 | ptregs.xes &= 0xffff; | 550 | ptregs.xes &= 0xffff; |
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 5ffbb4b7ad05..5c1fb6aada5b 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c | |||
@@ -32,9 +32,12 @@ | |||
32 | * in exit.c or in signal.c. | 32 | * in exit.c or in signal.c. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | /* determines which flags the user has access to. */ | 35 | /* |
36 | /* 1 = access 0 = no access */ | 36 | * Determines which flags the user has access to [1 = access, 0 = no access]. |
37 | #define FLAG_MASK 0x00044dd5 | 37 | * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9). |
38 | * Also masks reserved bits (31-22, 15, 5, 3, 1). | ||
39 | */ | ||
40 | #define FLAG_MASK 0x00054dd5 | ||
38 | 41 | ||
39 | /* set's the trap flag. */ | 42 | /* set's the trap flag. */ |
40 | #define TRAP_FLAG 0x100 | 43 | #define TRAP_FLAG 0x100 |
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c index 2afe0f8d555a..2fa5803a759d 100644 --- a/arch/i386/kernel/reboot.c +++ b/arch/i386/kernel/reboot.c | |||
@@ -111,12 +111,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
111 | DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), | 111 | DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), |
112 | }, | 112 | }, |
113 | }, | 113 | }, |
114 | { /* Handle problems with rebooting on HP nc6120 */ | 114 | { /* Handle problems with rebooting on HP laptops */ |
115 | .callback = set_bios_reboot, | 115 | .callback = set_bios_reboot, |
116 | .ident = "HP Compaq nc6120", | 116 | .ident = "HP Compaq Laptop", |
117 | .matches = { | 117 | .matches = { |
118 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | 118 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
119 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6120"), | 119 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), |
120 | }, | 120 | }, |
121 | }, | 121 | }, |
122 | { } | 122 | { } |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index fdfcb0cba9b4..27c956db0461 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -954,6 +954,12 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg) | |||
954 | return 0; | 954 | return 0; |
955 | } | 955 | } |
956 | 956 | ||
957 | static int __init | ||
958 | efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) | ||
959 | { | ||
960 | memory_present(0, start, end); | ||
961 | return 0; | ||
962 | } | ||
957 | 963 | ||
958 | /* | 964 | /* |
959 | * Find the highest page frame number we have available | 965 | * Find the highest page frame number we have available |
@@ -965,6 +971,7 @@ void __init find_max_pfn(void) | |||
965 | max_pfn = 0; | 971 | max_pfn = 0; |
966 | if (efi_enabled) { | 972 | if (efi_enabled) { |
967 | efi_memmap_walk(efi_find_max_pfn, &max_pfn); | 973 | efi_memmap_walk(efi_find_max_pfn, &max_pfn); |
974 | efi_memmap_walk(efi_memory_present_wrapper, NULL); | ||
968 | return; | 975 | return; |
969 | } | 976 | } |
970 | 977 | ||
@@ -979,6 +986,7 @@ void __init find_max_pfn(void) | |||
979 | continue; | 986 | continue; |
980 | if (end > max_pfn) | 987 | if (end > max_pfn) |
981 | max_pfn = end; | 988 | max_pfn = end; |
989 | memory_present(0, start, end); | ||
982 | } | 990 | } |
983 | } | 991 | } |
984 | 992 | ||
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index d16520da4550..b3c2e2c26743 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -903,6 +903,12 @@ static int __devinit do_boot_cpu(int apicid, int cpu) | |||
903 | unsigned long start_eip; | 903 | unsigned long start_eip; |
904 | unsigned short nmi_high = 0, nmi_low = 0; | 904 | unsigned short nmi_high = 0, nmi_low = 0; |
905 | 905 | ||
906 | if (!cpu_gdt_descr[cpu].address && | ||
907 | !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { | ||
908 | printk("Failed to allocate GDT for CPU %d\n", cpu); | ||
909 | return 1; | ||
910 | } | ||
911 | |||
906 | ++cpucount; | 912 | ++cpucount; |
907 | 913 | ||
908 | /* | 914 | /* |
@@ -1338,8 +1344,7 @@ int __cpu_disable(void) | |||
1338 | if (cpu == 0) | 1344 | if (cpu == 0) |
1339 | return -EBUSY; | 1345 | return -EBUSY; |
1340 | 1346 | ||
1341 | /* We enable the timer again on the exit path of the death loop */ | 1347 | clear_local_APIC(); |
1342 | disable_APIC_timer(); | ||
1343 | /* Allow any queued timer interrupts to get serviced */ | 1348 | /* Allow any queued timer interrupts to get serviced */ |
1344 | local_irq_enable(); | 1349 | local_irq_enable(); |
1345 | mdelay(1); | 1350 | mdelay(1); |
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 9b21a31d4f4e..f7ba4acc20ec 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
@@ -1,4 +1,3 @@ | |||
1 | .data | ||
2 | ENTRY(sys_call_table) | 1 | ENTRY(sys_call_table) |
3 | .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ | 2 | .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ |
4 | .long sys_exit | 3 | .long sys_exit |
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index d395e3b42485..47675bbbb316 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c | |||
@@ -330,7 +330,9 @@ int recalibrate_cpu_khz(void) | |||
330 | unsigned int cpu_khz_old = cpu_khz; | 330 | unsigned int cpu_khz_old = cpu_khz; |
331 | 331 | ||
332 | if (cpu_has_tsc) { | 332 | if (cpu_has_tsc) { |
333 | local_irq_disable(); | ||
333 | init_cpu_khz(); | 334 | init_cpu_khz(); |
335 | local_irq_enable(); | ||
334 | cpu_data[0].loops_per_jiffy = | 336 | cpu_data[0].loops_per_jiffy = |
335 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | 337 | cpufreq_scale(cpu_data[0].loops_per_jiffy, |
336 | cpu_khz_old, | 338 | cpu_khz_old, |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index c34d1bfc5161..53ad954e3ba4 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -306,14 +306,17 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
306 | .lock_owner_depth = 0 | 306 | .lock_owner_depth = 0 |
307 | }; | 307 | }; |
308 | static int die_counter; | 308 | static int die_counter; |
309 | unsigned long flags; | ||
309 | 310 | ||
310 | if (die.lock_owner != raw_smp_processor_id()) { | 311 | if (die.lock_owner != raw_smp_processor_id()) { |
311 | console_verbose(); | 312 | console_verbose(); |
312 | spin_lock_irq(&die.lock); | 313 | spin_lock_irqsave(&die.lock, flags); |
313 | die.lock_owner = smp_processor_id(); | 314 | die.lock_owner = smp_processor_id(); |
314 | die.lock_owner_depth = 0; | 315 | die.lock_owner_depth = 0; |
315 | bust_spinlocks(1); | 316 | bust_spinlocks(1); |
316 | } | 317 | } |
318 | else | ||
319 | local_save_flags(flags); | ||
317 | 320 | ||
318 | if (++die.lock_owner_depth < 3) { | 321 | if (++die.lock_owner_depth < 3) { |
319 | int nl = 0; | 322 | int nl = 0; |
@@ -340,7 +343,7 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
340 | 343 | ||
341 | bust_spinlocks(0); | 344 | bust_spinlocks(0); |
342 | die.lock_owner = -1; | 345 | die.lock_owner = -1; |
343 | spin_unlock_irq(&die.lock); | 346 | spin_unlock_irqrestore(&die.lock, flags); |
344 | 347 | ||
345 | if (kexec_should_crash(current)) | 348 | if (kexec_should_crash(current)) |
346 | crash_kexec(regs); | 349 | crash_kexec(regs); |
@@ -452,7 +455,7 @@ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3) | |||
452 | #endif | 455 | #endif |
453 | DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) | 456 | DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) |
454 | DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) | 457 | DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) |
455 | DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip) | 458 | DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip) |
456 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) | 459 | DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) |
457 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | 460 | DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) |
458 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 461 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
@@ -650,13 +653,6 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code) | |||
650 | 653 | ||
651 | cpu = smp_processor_id(); | 654 | cpu = smp_processor_id(); |
652 | 655 | ||
653 | #ifdef CONFIG_HOTPLUG_CPU | ||
654 | if (!cpu_online(cpu)) { | ||
655 | nmi_exit(); | ||
656 | return; | ||
657 | } | ||
658 | #endif | ||
659 | |||
660 | ++nmi_count(cpu); | 656 | ++nmi_count(cpu); |
661 | 657 | ||
662 | if (!rcu_dereference(nmi_callback)(regs, cpu)) | 658 | if (!rcu_dereference(nmi_callback)(regs, cpu)) |
@@ -1082,9 +1078,9 @@ void __init trap_init(void) | |||
1082 | set_trap_gate(0,÷_error); | 1078 | set_trap_gate(0,÷_error); |
1083 | set_intr_gate(1,&debug); | 1079 | set_intr_gate(1,&debug); |
1084 | set_intr_gate(2,&nmi); | 1080 | set_intr_gate(2,&nmi); |
1085 | set_system_intr_gate(3, &int3); /* int3-5 can be called from all */ | 1081 | set_system_intr_gate(3, &int3); /* int3/4 can be called from all */ |
1086 | set_system_gate(4,&overflow); | 1082 | set_system_gate(4,&overflow); |
1087 | set_system_gate(5,&bounds); | 1083 | set_trap_gate(5,&bounds); |
1088 | set_trap_gate(6,&invalid_op); | 1084 | set_trap_gate(6,&invalid_op); |
1089 | set_trap_gate(7,&device_not_available); | 1085 | set_trap_gate(7,&device_not_available); |
1090 | set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); | 1086 | set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); |
@@ -1102,6 +1098,28 @@ void __init trap_init(void) | |||
1102 | #endif | 1098 | #endif |
1103 | set_trap_gate(19,&simd_coprocessor_error); | 1099 | set_trap_gate(19,&simd_coprocessor_error); |
1104 | 1100 | ||
1101 | if (cpu_has_fxsr) { | ||
1102 | /* | ||
1103 | * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. | ||
1104 | * Generates a compile-time "error: zero width for bit-field" if | ||
1105 | * the alignment is wrong. | ||
1106 | */ | ||
1107 | struct fxsrAlignAssert { | ||
1108 | int _:!(offsetof(struct task_struct, | ||
1109 | thread.i387.fxsave) & 15); | ||
1110 | }; | ||
1111 | |||
1112 | printk(KERN_INFO "Enabling fast FPU save and restore... "); | ||
1113 | set_in_cr4(X86_CR4_OSFXSR); | ||
1114 | printk("done.\n"); | ||
1115 | } | ||
1116 | if (cpu_has_xmm) { | ||
1117 | printk(KERN_INFO "Enabling unmasked SIMD FPU exception " | ||
1118 | "support... "); | ||
1119 | set_in_cr4(X86_CR4_OSXMMEXCPT); | ||
1120 | printk("done.\n"); | ||
1121 | } | ||
1122 | |||
1105 | set_system_gate(SYSCALL_VECTOR,&system_call); | 1123 | set_system_gate(SYSCALL_VECTOR,&system_call); |
1106 | 1124 | ||
1107 | /* | 1125 | /* |