diff options
Diffstat (limited to 'arch/i386/kernel')
30 files changed, 513 insertions, 302 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 5546ddebec33..9204be6eedb3 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -803,6 +803,7 @@ no_apic: | |||
803 | 803 | ||
804 | void __init init_apic_mappings(void) | 804 | void __init init_apic_mappings(void) |
805 | { | 805 | { |
806 | unsigned int orig_apicid; | ||
806 | unsigned long apic_phys; | 807 | unsigned long apic_phys; |
807 | 808 | ||
808 | /* | 809 | /* |
@@ -824,8 +825,11 @@ void __init init_apic_mappings(void) | |||
824 | * Fetch the APIC ID of the BSP in case we have a | 825 | * Fetch the APIC ID of the BSP in case we have a |
825 | * default configuration (or the MP table is broken). | 826 | * default configuration (or the MP table is broken). |
826 | */ | 827 | */ |
827 | if (boot_cpu_physical_apicid == -1U) | 828 | orig_apicid = boot_cpu_physical_apicid; |
828 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 829 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); |
830 | if ((orig_apicid != -1U) && (orig_apicid != boot_cpu_physical_apicid)) | ||
831 | printk(KERN_WARNING "Boot APIC ID in local APIC unexpected (%d vs %d)", | ||
832 | orig_apicid, boot_cpu_physical_apicid); | ||
829 | 833 | ||
830 | #ifdef CONFIG_X86_IO_APIC | 834 | #ifdef CONFIG_X86_IO_APIC |
831 | { | 835 | { |
@@ -1046,10 +1050,11 @@ static unsigned int calibration_result; | |||
1046 | 1050 | ||
1047 | void __init setup_boot_APIC_clock(void) | 1051 | void __init setup_boot_APIC_clock(void) |
1048 | { | 1052 | { |
1053 | unsigned long flags; | ||
1049 | apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); | 1054 | apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); |
1050 | using_apic_timer = 1; | 1055 | using_apic_timer = 1; |
1051 | 1056 | ||
1052 | local_irq_disable(); | 1057 | local_irq_save(flags); |
1053 | 1058 | ||
1054 | calibration_result = calibrate_APIC_clock(); | 1059 | calibration_result = calibrate_APIC_clock(); |
1055 | /* | 1060 | /* |
@@ -1057,7 +1062,7 @@ void __init setup_boot_APIC_clock(void) | |||
1057 | */ | 1062 | */ |
1058 | setup_APIC_timer(calibration_result); | 1063 | setup_APIC_timer(calibration_result); |
1059 | 1064 | ||
1060 | local_irq_enable(); | 1065 | local_irq_restore(flags); |
1061 | } | 1066 | } |
1062 | 1067 | ||
1063 | void __devinit setup_secondary_APIC_clock(void) | 1068 | void __devinit setup_secondary_APIC_clock(void) |
@@ -1254,40 +1259,81 @@ fastcall void smp_error_interrupt(struct pt_regs *regs) | |||
1254 | } | 1259 | } |
1255 | 1260 | ||
1256 | /* | 1261 | /* |
1257 | * This initializes the IO-APIC and APIC hardware if this is | 1262 | * This initializes the IO-APIC and APIC hardware. |
1258 | * a UP kernel. | ||
1259 | */ | 1263 | */ |
1260 | int __init APIC_init_uniprocessor (void) | 1264 | int __init APIC_init(void) |
1261 | { | 1265 | { |
1262 | if (enable_local_apic < 0) | 1266 | if (enable_local_apic < 0) { |
1263 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | 1267 | printk(KERN_INFO "APIC disabled\n"); |
1268 | return -1; | ||
1269 | } | ||
1264 | 1270 | ||
1265 | if (!smp_found_config && !cpu_has_apic) | 1271 | /* See if we have a SMP configuration or have forced enabled |
1272 | * the local apic. | ||
1273 | */ | ||
1274 | if (!smp_found_config && !acpi_lapic && !cpu_has_apic) { | ||
1275 | enable_local_apic = -1; | ||
1266 | return -1; | 1276 | return -1; |
1277 | } | ||
1267 | 1278 | ||
1268 | /* | 1279 | /* |
1269 | * Complain if the BIOS pretends there is one. | 1280 | * Complain if the BIOS pretends there is an apic. |
1281 | * Then get out because we don't have an a local apic. | ||
1270 | */ | 1282 | */ |
1271 | if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | 1283 | if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
1272 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | 1284 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", |
1273 | boot_cpu_physical_apicid); | 1285 | boot_cpu_physical_apicid); |
1286 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | ||
1287 | enable_local_apic = -1; | ||
1274 | return -1; | 1288 | return -1; |
1275 | } | 1289 | } |
1276 | 1290 | ||
1277 | verify_local_APIC(); | 1291 | verify_local_APIC(); |
1278 | 1292 | ||
1293 | /* | ||
1294 | * Should not be necessary because the MP table should list the boot | ||
1295 | * CPU too, but we do it for the sake of robustness anyway. | ||
1296 | * Makes no sense to do this check in clustered apic mode, so skip it | ||
1297 | */ | ||
1298 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { | ||
1299 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", | ||
1300 | boot_cpu_physical_apicid); | ||
1301 | physid_set(boot_cpu_physical_apicid, phys_cpu_present_map); | ||
1302 | } | ||
1303 | |||
1304 | /* | ||
1305 | * Switch from PIC to APIC mode. | ||
1306 | */ | ||
1279 | connect_bsp_APIC(); | 1307 | connect_bsp_APIC(); |
1308 | setup_local_APIC(); | ||
1280 | 1309 | ||
1281 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | 1310 | #ifdef CONFIG_X86_IO_APIC |
1311 | /* | ||
1312 | * Now start the IO-APICs | ||
1313 | */ | ||
1314 | if (smp_found_config && !skip_ioapic_setup && nr_ioapics) | ||
1315 | setup_IO_APIC(); | ||
1316 | #endif | ||
1317 | return 0; | ||
1318 | } | ||
1282 | 1319 | ||
1283 | setup_local_APIC(); | 1320 | void __init APIC_late_time_init(void) |
1321 | { | ||
1322 | /* Improve our loops per jiffy estimate */ | ||
1323 | loops_per_jiffy = ((1000 + HZ - 1)/HZ)*cpu_khz; | ||
1324 | boot_cpu_data.loops_per_jiffy = loops_per_jiffy; | ||
1325 | cpu_data[0].loops_per_jiffy = loops_per_jiffy; | ||
1326 | |||
1327 | /* setup_apic_nmi_watchdog doesn't work properly before cpu_khz is | ||
1328 | * initialized. So redo it here to ensure the boot cpu is setup | ||
1329 | * properly. | ||
1330 | */ | ||
1331 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1332 | setup_apic_nmi_watchdog(); | ||
1284 | 1333 | ||
1285 | #ifdef CONFIG_X86_IO_APIC | 1334 | #ifdef CONFIG_X86_IO_APIC |
1286 | if (smp_found_config) | 1335 | if (smp_found_config && !skip_ioapic_setup && nr_ioapics) |
1287 | if (!skip_ioapic_setup && nr_ioapics) | 1336 | IO_APIC_late_time_init(); |
1288 | setup_IO_APIC(); | ||
1289 | #endif | 1337 | #endif |
1290 | setup_boot_APIC_clock(); | 1338 | setup_boot_APIC_clock(); |
1291 | |||
1292 | return 0; | ||
1293 | } | 1339 | } |
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index d7811c4e8b50..d2ef0c2aa93e 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c | |||
@@ -597,12 +597,14 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, | |||
597 | cpumask_t cpus; | 597 | cpumask_t cpus; |
598 | int cpu; | 598 | int cpu; |
599 | struct desc_struct save_desc_40; | 599 | struct desc_struct save_desc_40; |
600 | struct desc_struct *gdt; | ||
600 | 601 | ||
601 | cpus = apm_save_cpus(); | 602 | cpus = apm_save_cpus(); |
602 | 603 | ||
603 | cpu = get_cpu(); | 604 | cpu = get_cpu(); |
604 | save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; | 605 | gdt = get_cpu_gdt_table(cpu); |
605 | per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; | 606 | save_desc_40 = gdt[0x40 / 8]; |
607 | gdt[0x40 / 8] = bad_bios_desc; | ||
606 | 608 | ||
607 | local_save_flags(flags); | 609 | local_save_flags(flags); |
608 | APM_DO_CLI; | 610 | APM_DO_CLI; |
@@ -610,7 +612,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, | |||
610 | apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); | 612 | apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); |
611 | APM_DO_RESTORE_SEGS; | 613 | APM_DO_RESTORE_SEGS; |
612 | local_irq_restore(flags); | 614 | local_irq_restore(flags); |
613 | per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = save_desc_40; | 615 | gdt[0x40 / 8] = save_desc_40; |
614 | put_cpu(); | 616 | put_cpu(); |
615 | apm_restore_cpus(cpus); | 617 | apm_restore_cpus(cpus); |
616 | 618 | ||
@@ -639,13 +641,14 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) | |||
639 | cpumask_t cpus; | 641 | cpumask_t cpus; |
640 | int cpu; | 642 | int cpu; |
641 | struct desc_struct save_desc_40; | 643 | struct desc_struct save_desc_40; |
642 | 644 | struct desc_struct *gdt; | |
643 | 645 | ||
644 | cpus = apm_save_cpus(); | 646 | cpus = apm_save_cpus(); |
645 | 647 | ||
646 | cpu = get_cpu(); | 648 | cpu = get_cpu(); |
647 | save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; | 649 | gdt = get_cpu_gdt_table(cpu); |
648 | per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; | 650 | save_desc_40 = gdt[0x40 / 8]; |
651 | gdt[0x40 / 8] = bad_bios_desc; | ||
649 | 652 | ||
650 | local_save_flags(flags); | 653 | local_save_flags(flags); |
651 | APM_DO_CLI; | 654 | APM_DO_CLI; |
@@ -653,7 +656,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) | |||
653 | error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); | 656 | error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); |
654 | APM_DO_RESTORE_SEGS; | 657 | APM_DO_RESTORE_SEGS; |
655 | local_irq_restore(flags); | 658 | local_irq_restore(flags); |
656 | __get_cpu_var(cpu_gdt_table)[0x40 / 8] = save_desc_40; | 659 | gdt[0x40 / 8] = save_desc_40; |
657 | put_cpu(); | 660 | put_cpu(); |
658 | apm_restore_cpus(cpus); | 661 | apm_restore_cpus(cpus); |
659 | return error; | 662 | return error; |
@@ -2295,35 +2298,36 @@ static int __init apm_init(void) | |||
2295 | apm_bios_entry.segment = APM_CS; | 2298 | apm_bios_entry.segment = APM_CS; |
2296 | 2299 | ||
2297 | for (i = 0; i < NR_CPUS; i++) { | 2300 | for (i = 0; i < NR_CPUS; i++) { |
2298 | set_base(per_cpu(cpu_gdt_table, i)[APM_CS >> 3], | 2301 | struct desc_struct *gdt = get_cpu_gdt_table(i); |
2302 | set_base(gdt[APM_CS >> 3], | ||
2299 | __va((unsigned long)apm_info.bios.cseg << 4)); | 2303 | __va((unsigned long)apm_info.bios.cseg << 4)); |
2300 | set_base(per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], | 2304 | set_base(gdt[APM_CS_16 >> 3], |
2301 | __va((unsigned long)apm_info.bios.cseg_16 << 4)); | 2305 | __va((unsigned long)apm_info.bios.cseg_16 << 4)); |
2302 | set_base(per_cpu(cpu_gdt_table, i)[APM_DS >> 3], | 2306 | set_base(gdt[APM_DS >> 3], |
2303 | __va((unsigned long)apm_info.bios.dseg << 4)); | 2307 | __va((unsigned long)apm_info.bios.dseg << 4)); |
2304 | #ifndef APM_RELAX_SEGMENTS | 2308 | #ifndef APM_RELAX_SEGMENTS |
2305 | if (apm_info.bios.version == 0x100) { | 2309 | if (apm_info.bios.version == 0x100) { |
2306 | #endif | 2310 | #endif |
2307 | /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ | 2311 | /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ |
2308 | _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 - 1); | 2312 | _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1); |
2309 | /* For some unknown machine. */ | 2313 | /* For some unknown machine. */ |
2310 | _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 64 * 1024 - 1); | 2314 | _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1); |
2311 | /* For the DEC Hinote Ultra CT475 (and others?) */ | 2315 | /* For the DEC Hinote Ultra CT475 (and others?) */ |
2312 | _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 64 * 1024 - 1); | 2316 | _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1); |
2313 | #ifndef APM_RELAX_SEGMENTS | 2317 | #ifndef APM_RELAX_SEGMENTS |
2314 | } else { | 2318 | } else { |
2315 | _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], | 2319 | _set_limit((char *)&gdt[APM_CS >> 3], |
2316 | (apm_info.bios.cseg_len - 1) & 0xffff); | 2320 | (apm_info.bios.cseg_len - 1) & 0xffff); |
2317 | _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], | 2321 | _set_limit((char *)&gdt[APM_CS_16 >> 3], |
2318 | (apm_info.bios.cseg_16_len - 1) & 0xffff); | 2322 | (apm_info.bios.cseg_16_len - 1) & 0xffff); |
2319 | _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], | 2323 | _set_limit((char *)&gdt[APM_DS >> 3], |
2320 | (apm_info.bios.dseg_len - 1) & 0xffff); | 2324 | (apm_info.bios.dseg_len - 1) & 0xffff); |
2321 | /* workaround for broken BIOSes */ | 2325 | /* workaround for broken BIOSes */ |
2322 | if (apm_info.bios.cseg_len <= apm_info.bios.offset) | 2326 | if (apm_info.bios.cseg_len <= apm_info.bios.offset) |
2323 | _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 -1); | 2327 | _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1); |
2324 | if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ | 2328 | if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ |
2325 | /* for the BIOS that assumes granularity = 1 */ | 2329 | /* for the BIOS that assumes granularity = 1 */ |
2326 | per_cpu(cpu_gdt_table, i)[APM_DS >> 3].b |= 0x800000; | 2330 | gdt[APM_DS >> 3].b |= 0x800000; |
2327 | printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); | 2331 | printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); |
2328 | } | 2332 | } |
2329 | } | 2333 | } |
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 9ad43be9a01f..74145a33cb0f 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -573,6 +573,7 @@ void __devinit cpu_init(void) | |||
573 | int cpu = smp_processor_id(); | 573 | int cpu = smp_processor_id(); |
574 | struct tss_struct * t = &per_cpu(init_tss, cpu); | 574 | struct tss_struct * t = &per_cpu(init_tss, cpu); |
575 | struct thread_struct *thread = ¤t->thread; | 575 | struct thread_struct *thread = ¤t->thread; |
576 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
576 | __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); | 577 | __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); |
577 | 578 | ||
578 | if (cpu_test_and_set(cpu, cpu_initialized)) { | 579 | if (cpu_test_and_set(cpu, cpu_initialized)) { |
@@ -594,24 +595,16 @@ void __devinit cpu_init(void) | |||
594 | * Initialize the per-CPU GDT with the boot GDT, | 595 | * Initialize the per-CPU GDT with the boot GDT, |
595 | * and set up the GDT descriptor: | 596 | * and set up the GDT descriptor: |
596 | */ | 597 | */ |
597 | memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, | 598 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); |
598 | GDT_SIZE); | ||
599 | 599 | ||
600 | /* Set up GDT entry for 16bit stack */ | 600 | /* Set up GDT entry for 16bit stack */ |
601 | *(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= | 601 | *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |= |
602 | ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | | 602 | ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | |
603 | ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | | 603 | ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | |
604 | (CPU_16BIT_STACK_SIZE - 1); | 604 | (CPU_16BIT_STACK_SIZE - 1); |
605 | 605 | ||
606 | cpu_gdt_descr[cpu].size = GDT_SIZE - 1; | 606 | cpu_gdt_descr[cpu].size = GDT_SIZE - 1; |
607 | cpu_gdt_descr[cpu].address = | 607 | cpu_gdt_descr[cpu].address = (unsigned long)gdt; |
608 | (unsigned long)&per_cpu(cpu_gdt_table, cpu); | ||
609 | |||
610 | /* | ||
611 | * Set up the per-thread TLS descriptor cache: | ||
612 | */ | ||
613 | memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu), | ||
614 | GDT_ENTRY_TLS_ENTRIES * 8); | ||
615 | 608 | ||
616 | load_gdt(&cpu_gdt_descr[cpu]); | 609 | load_gdt(&cpu_gdt_descr[cpu]); |
617 | load_idt(&idt_descr); | 610 | load_idt(&idt_descr); |
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 822c8ce9d1f1..caa9f7711343 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/proc_fs.h> | 32 | #include <linux/proc_fs.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
35 | #include <linux/sched.h> /* current */ | ||
35 | #include <asm/io.h> | 36 | #include <asm/io.h> |
36 | #include <asm/delay.h> | 37 | #include <asm/delay.h> |
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c index aa622d52c6e5..270f2188d68b 100644 --- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/cpufreq.h> | 28 | #include <linux/cpufreq.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/cpumask.h> | 30 | #include <linux/cpumask.h> |
31 | #include <linux/sched.h> /* current / set_cpus_allowed() */ | ||
31 | 32 | ||
32 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
33 | #include <asm/msr.h> | 34 | #include <asm/msr.h> |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 58ca98fdc2ca..2d5c9adba0cd 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/string.h> | 33 | #include <linux/string.h> |
34 | #include <linux/cpumask.h> | 34 | #include <linux/cpumask.h> |
35 | #include <linux/sched.h> /* for current / set_cpus_allowed() */ | ||
35 | 36 | ||
36 | #include <asm/msr.h> | 37 | #include <asm/msr.h> |
37 | #include <asm/io.h> | 38 | #include <asm/io.h> |
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c index c397b6220430..1465974256c9 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/cpufreq.h> | 23 | #include <linux/cpufreq.h> |
24 | #include <linux/config.h> | 24 | #include <linux/config.h> |
25 | #include <linux/sched.h> /* current */ | ||
25 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
26 | #include <linux/compiler.h> | 27 | #include <linux/compiler.h> |
27 | 28 | ||
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 9e0d5f83cb9f..4dc42a189ae5 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | ||
6 | */ | 7 | */ |
7 | 8 | ||
8 | #include <linux/init.h> | 9 | #include <linux/init.h> |
@@ -10,6 +11,7 @@ | |||
10 | #include <linux/device.h> | 11 | #include <linux/device.h> |
11 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
12 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
14 | #include <linux/sched.h> | ||
13 | 15 | ||
14 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
15 | #include <asm/smp.h> | 17 | #include <asm/smp.h> |
@@ -28,7 +30,7 @@ struct _cache_table | |||
28 | }; | 30 | }; |
29 | 31 | ||
30 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ | 32 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ |
31 | static struct _cache_table cache_table[] __devinitdata = | 33 | static struct _cache_table cache_table[] __cpuinitdata = |
32 | { | 34 | { |
33 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 35 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
34 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | 36 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
@@ -117,10 +119,9 @@ struct _cpuid4_info { | |||
117 | cpumask_t shared_cpu_map; | 119 | cpumask_t shared_cpu_map; |
118 | }; | 120 | }; |
119 | 121 | ||
120 | #define MAX_CACHE_LEAVES 4 | ||
121 | static unsigned short num_cache_leaves; | 122 | static unsigned short num_cache_leaves; |
122 | 123 | ||
123 | static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 124 | static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) |
124 | { | 125 | { |
125 | unsigned int eax, ebx, ecx, edx; | 126 | unsigned int eax, ebx, ecx, edx; |
126 | union _cpuid4_leaf_eax cache_eax; | 127 | union _cpuid4_leaf_eax cache_eax; |
@@ -144,23 +145,18 @@ static int __init find_num_cache_leaves(void) | |||
144 | { | 145 | { |
145 | unsigned int eax, ebx, ecx, edx; | 146 | unsigned int eax, ebx, ecx, edx; |
146 | union _cpuid4_leaf_eax cache_eax; | 147 | union _cpuid4_leaf_eax cache_eax; |
147 | int i; | 148 | int i = -1; |
148 | int retval; | ||
149 | 149 | ||
150 | retval = MAX_CACHE_LEAVES; | 150 | do { |
151 | /* Do cpuid(4) loop to find out num_cache_leaves */ | 151 | ++i; |
152 | for (i = 0; i < MAX_CACHE_LEAVES; i++) { | 152 | /* Do cpuid(4) loop to find out num_cache_leaves */ |
153 | cpuid_count(4, i, &eax, &ebx, &ecx, &edx); | 153 | cpuid_count(4, i, &eax, &ebx, &ecx, &edx); |
154 | cache_eax.full = eax; | 154 | cache_eax.full = eax; |
155 | if (cache_eax.split.type == CACHE_TYPE_NULL) { | 155 | } while (cache_eax.split.type != CACHE_TYPE_NULL); |
156 | retval = i; | 156 | return i; |
157 | break; | ||
158 | } | ||
159 | } | ||
160 | return retval; | ||
161 | } | 157 | } |
162 | 158 | ||
163 | unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | 159 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) |
164 | { | 160 | { |
165 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ | 161 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ |
166 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 162 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
@@ -284,13 +280,7 @@ unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
284 | if ( l3 ) | 280 | if ( l3 ) |
285 | printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); | 281 | printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); |
286 | 282 | ||
287 | /* | 283 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); |
288 | * This assumes the L3 cache is shared; it typically lives in | ||
289 | * the northbridge. The L1 caches are included by the L2 | ||
290 | * cache, and so should not be included for the purpose of | ||
291 | * SMP switching weights. | ||
292 | */ | ||
293 | c->x86_cache_size = l2 ? l2 : (l1i+l1d); | ||
294 | } | 284 | } |
295 | 285 | ||
296 | return l2; | 286 | return l2; |
@@ -301,7 +291,7 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS]; | |||
301 | #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) | 291 | #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) |
302 | 292 | ||
303 | #ifdef CONFIG_SMP | 293 | #ifdef CONFIG_SMP |
304 | static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 294 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
305 | { | 295 | { |
306 | struct _cpuid4_info *this_leaf; | 296 | struct _cpuid4_info *this_leaf; |
307 | unsigned long num_threads_sharing; | 297 | unsigned long num_threads_sharing; |
@@ -334,7 +324,7 @@ static void free_cache_attributes(unsigned int cpu) | |||
334 | cpuid4_info[cpu] = NULL; | 324 | cpuid4_info[cpu] = NULL; |
335 | } | 325 | } |
336 | 326 | ||
337 | static int __devinit detect_cache_attributes(unsigned int cpu) | 327 | static int __cpuinit detect_cache_attributes(unsigned int cpu) |
338 | { | 328 | { |
339 | struct _cpuid4_info *this_leaf; | 329 | struct _cpuid4_info *this_leaf; |
340 | unsigned long j; | 330 | unsigned long j; |
@@ -511,7 +501,7 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu) | |||
511 | free_cache_attributes(cpu); | 501 | free_cache_attributes(cpu); |
512 | } | 502 | } |
513 | 503 | ||
514 | static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu) | 504 | static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) |
515 | { | 505 | { |
516 | 506 | ||
517 | if (num_cache_leaves == 0) | 507 | if (num_cache_leaves == 0) |
@@ -542,7 +532,7 @@ err_out: | |||
542 | } | 532 | } |
543 | 533 | ||
544 | /* Add/Remove cache interface for CPU device */ | 534 | /* Add/Remove cache interface for CPU device */ |
545 | static int __devinit cache_add_dev(struct sys_device * sys_dev) | 535 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) |
546 | { | 536 | { |
547 | unsigned int cpu = sys_dev->id; | 537 | unsigned int cpu = sys_dev->id; |
548 | unsigned long i, j; | 538 | unsigned long i, j; |
@@ -579,7 +569,7 @@ static int __devinit cache_add_dev(struct sys_device * sys_dev) | |||
579 | return retval; | 569 | return retval; |
580 | } | 570 | } |
581 | 571 | ||
582 | static int __devexit cache_remove_dev(struct sys_device * sys_dev) | 572 | static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) |
583 | { | 573 | { |
584 | unsigned int cpu = sys_dev->id; | 574 | unsigned int cpu = sys_dev->id; |
585 | unsigned long i; | 575 | unsigned long i; |
@@ -588,24 +578,49 @@ static int __devexit cache_remove_dev(struct sys_device * sys_dev) | |||
588 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 578 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
589 | kobject_unregister(cache_kobject[cpu]); | 579 | kobject_unregister(cache_kobject[cpu]); |
590 | cpuid4_cache_sysfs_exit(cpu); | 580 | cpuid4_cache_sysfs_exit(cpu); |
591 | return 0; | 581 | return; |
582 | } | ||
583 | |||
584 | static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | ||
585 | unsigned long action, void *hcpu) | ||
586 | { | ||
587 | unsigned int cpu = (unsigned long)hcpu; | ||
588 | struct sys_device *sys_dev; | ||
589 | |||
590 | sys_dev = get_cpu_sysdev(cpu); | ||
591 | switch (action) { | ||
592 | case CPU_ONLINE: | ||
593 | cache_add_dev(sys_dev); | ||
594 | break; | ||
595 | case CPU_DEAD: | ||
596 | cache_remove_dev(sys_dev); | ||
597 | break; | ||
598 | } | ||
599 | return NOTIFY_OK; | ||
592 | } | 600 | } |
593 | 601 | ||
594 | static struct sysdev_driver cache_sysdev_driver = { | 602 | static struct notifier_block cacheinfo_cpu_notifier = |
595 | .add = cache_add_dev, | 603 | { |
596 | .remove = __devexit_p(cache_remove_dev), | 604 | .notifier_call = cacheinfo_cpu_callback, |
597 | }; | 605 | }; |
598 | 606 | ||
599 | /* Register/Unregister the cpu_cache driver */ | 607 | static int __cpuinit cache_sysfs_init(void) |
600 | static int __devinit cache_register_driver(void) | ||
601 | { | 608 | { |
609 | int i; | ||
610 | |||
602 | if (num_cache_leaves == 0) | 611 | if (num_cache_leaves == 0) |
603 | return 0; | 612 | return 0; |
604 | 613 | ||
605 | return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver); | 614 | register_cpu_notifier(&cacheinfo_cpu_notifier); |
615 | |||
616 | for_each_online_cpu(i) { | ||
617 | cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE, | ||
618 | (void *)(long)i); | ||
619 | } | ||
620 | |||
621 | return 0; | ||
606 | } | 622 | } |
607 | 623 | ||
608 | device_initcall(cache_register_driver); | 624 | device_initcall(cache_sysfs_init); |
609 | 625 | ||
610 | #endif | 626 | #endif |
611 | |||
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c index 3c035b8fa3d9..979b18bc95c1 100644 --- a/arch/i386/kernel/cpu/mcheck/p6.c +++ b/arch/i386/kernel/cpu/mcheck/p6.c | |||
@@ -102,11 +102,16 @@ void __devinit intel_p6_mcheck_init(struct cpuinfo_x86 *c) | |||
102 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 102 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
103 | nr_mce_banks = l & 0xff; | 103 | nr_mce_banks = l & 0xff; |
104 | 104 | ||
105 | /* Don't enable bank 0 on intel P6 cores, it goes bang quickly. */ | 105 | /* |
106 | for (i=1; i<nr_mce_banks; i++) { | 106 | * Following the example in IA-32 SDM Vol 3: |
107 | * - MC0_CTL should not be written | ||
108 | * - Status registers on all banks should be cleared on reset | ||
109 | */ | ||
110 | for (i=1; i<nr_mce_banks; i++) | ||
107 | wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); | 111 | wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); |
112 | |||
113 | for (i=0; i<nr_mce_banks; i++) | ||
108 | wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); | 114 | wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); |
109 | } | ||
110 | 115 | ||
111 | set_in_cr4 (X86_CR4_MCE); | 116 | set_in_cr4 (X86_CR4_MCE); |
112 | printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", | 117 | printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", |
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c index 1923e0aed26a..cf39e205d33c 100644 --- a/arch/i386/kernel/cpu/mtrr/if.c +++ b/arch/i386/kernel/cpu/mtrr/if.c | |||
@@ -149,60 +149,89 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
149 | return -EINVAL; | 149 | return -EINVAL; |
150 | } | 150 | } |
151 | 151 | ||
152 | static int | 152 | static long |
153 | mtrr_ioctl(struct inode *inode, struct file *file, | 153 | mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) |
154 | unsigned int cmd, unsigned long __arg) | ||
155 | { | 154 | { |
156 | int err; | 155 | int err = 0; |
157 | mtrr_type type; | 156 | mtrr_type type; |
158 | struct mtrr_sentry sentry; | 157 | struct mtrr_sentry sentry; |
159 | struct mtrr_gentry gentry; | 158 | struct mtrr_gentry gentry; |
160 | void __user *arg = (void __user *) __arg; | 159 | void __user *arg = (void __user *) __arg; |
161 | 160 | ||
162 | switch (cmd) { | 161 | switch (cmd) { |
162 | case MTRRIOC_ADD_ENTRY: | ||
163 | case MTRRIOC_SET_ENTRY: | ||
164 | case MTRRIOC_DEL_ENTRY: | ||
165 | case MTRRIOC_KILL_ENTRY: | ||
166 | case MTRRIOC_ADD_PAGE_ENTRY: | ||
167 | case MTRRIOC_SET_PAGE_ENTRY: | ||
168 | case MTRRIOC_DEL_PAGE_ENTRY: | ||
169 | case MTRRIOC_KILL_PAGE_ENTRY: | ||
170 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
171 | return -EFAULT; | ||
172 | break; | ||
173 | case MTRRIOC_GET_ENTRY: | ||
174 | case MTRRIOC_GET_PAGE_ENTRY: | ||
175 | if (copy_from_user(&gentry, arg, sizeof gentry)) | ||
176 | return -EFAULT; | ||
177 | break; | ||
178 | #ifdef CONFIG_COMPAT | ||
179 | case MTRRIOC32_ADD_ENTRY: | ||
180 | case MTRRIOC32_SET_ENTRY: | ||
181 | case MTRRIOC32_DEL_ENTRY: | ||
182 | case MTRRIOC32_KILL_ENTRY: | ||
183 | case MTRRIOC32_ADD_PAGE_ENTRY: | ||
184 | case MTRRIOC32_SET_PAGE_ENTRY: | ||
185 | case MTRRIOC32_DEL_PAGE_ENTRY: | ||
186 | case MTRRIOC32_KILL_PAGE_ENTRY: { | ||
187 | struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg; | ||
188 | err = get_user(sentry.base, &s32->base); | ||
189 | err |= get_user(sentry.size, &s32->size); | ||
190 | err |= get_user(sentry.type, &s32->type); | ||
191 | if (err) | ||
192 | return err; | ||
193 | break; | ||
194 | } | ||
195 | case MTRRIOC32_GET_ENTRY: | ||
196 | case MTRRIOC32_GET_PAGE_ENTRY: { | ||
197 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | ||
198 | err = get_user(gentry.regnum, &g32->regnum); | ||
199 | err |= get_user(gentry.base, &g32->base); | ||
200 | err |= get_user(gentry.size, &g32->size); | ||
201 | err |= get_user(gentry.type, &g32->type); | ||
202 | if (err) | ||
203 | return err; | ||
204 | break; | ||
205 | } | ||
206 | #endif | ||
207 | } | ||
208 | |||
209 | switch (cmd) { | ||
163 | default: | 210 | default: |
164 | return -ENOTTY; | 211 | return -ENOTTY; |
165 | case MTRRIOC_ADD_ENTRY: | 212 | case MTRRIOC_ADD_ENTRY: |
166 | if (!capable(CAP_SYS_ADMIN)) | 213 | if (!capable(CAP_SYS_ADMIN)) |
167 | return -EPERM; | 214 | return -EPERM; |
168 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
169 | return -EFAULT; | ||
170 | err = | 215 | err = |
171 | mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, | 216 | mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, |
172 | file, 0); | 217 | file, 0); |
173 | if (err < 0) | ||
174 | return err; | ||
175 | break; | 218 | break; |
176 | case MTRRIOC_SET_ENTRY: | 219 | case MTRRIOC_SET_ENTRY: |
177 | if (!capable(CAP_SYS_ADMIN)) | 220 | if (!capable(CAP_SYS_ADMIN)) |
178 | return -EPERM; | 221 | return -EPERM; |
179 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
180 | return -EFAULT; | ||
181 | err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); | 222 | err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); |
182 | if (err < 0) | ||
183 | return err; | ||
184 | break; | 223 | break; |
185 | case MTRRIOC_DEL_ENTRY: | 224 | case MTRRIOC_DEL_ENTRY: |
186 | if (!capable(CAP_SYS_ADMIN)) | 225 | if (!capable(CAP_SYS_ADMIN)) |
187 | return -EPERM; | 226 | return -EPERM; |
188 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
189 | return -EFAULT; | ||
190 | err = mtrr_file_del(sentry.base, sentry.size, file, 0); | 227 | err = mtrr_file_del(sentry.base, sentry.size, file, 0); |
191 | if (err < 0) | ||
192 | return err; | ||
193 | break; | 228 | break; |
194 | case MTRRIOC_KILL_ENTRY: | 229 | case MTRRIOC_KILL_ENTRY: |
195 | if (!capable(CAP_SYS_ADMIN)) | 230 | if (!capable(CAP_SYS_ADMIN)) |
196 | return -EPERM; | 231 | return -EPERM; |
197 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
198 | return -EFAULT; | ||
199 | err = mtrr_del(-1, sentry.base, sentry.size); | 232 | err = mtrr_del(-1, sentry.base, sentry.size); |
200 | if (err < 0) | ||
201 | return err; | ||
202 | break; | 233 | break; |
203 | case MTRRIOC_GET_ENTRY: | 234 | case MTRRIOC_GET_ENTRY: |
204 | if (copy_from_user(&gentry, arg, sizeof gentry)) | ||
205 | return -EFAULT; | ||
206 | if (gentry.regnum >= num_var_ranges) | 235 | if (gentry.regnum >= num_var_ranges) |
207 | return -EINVAL; | 236 | return -EINVAL; |
208 | mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); | 237 | mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); |
@@ -217,60 +246,59 @@ mtrr_ioctl(struct inode *inode, struct file *file, | |||
217 | gentry.type = type; | 246 | gentry.type = type; |
218 | } | 247 | } |
219 | 248 | ||
220 | if (copy_to_user(arg, &gentry, sizeof gentry)) | ||
221 | return -EFAULT; | ||
222 | break; | 249 | break; |
223 | case MTRRIOC_ADD_PAGE_ENTRY: | 250 | case MTRRIOC_ADD_PAGE_ENTRY: |
224 | if (!capable(CAP_SYS_ADMIN)) | 251 | if (!capable(CAP_SYS_ADMIN)) |
225 | return -EPERM; | 252 | return -EPERM; |
226 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
227 | return -EFAULT; | ||
228 | err = | 253 | err = |
229 | mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, | 254 | mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, |
230 | file, 1); | 255 | file, 1); |
231 | if (err < 0) | ||
232 | return err; | ||
233 | break; | 256 | break; |
234 | case MTRRIOC_SET_PAGE_ENTRY: | 257 | case MTRRIOC_SET_PAGE_ENTRY: |
235 | if (!capable(CAP_SYS_ADMIN)) | 258 | if (!capable(CAP_SYS_ADMIN)) |
236 | return -EPERM; | 259 | return -EPERM; |
237 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
238 | return -EFAULT; | ||
239 | err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); | 260 | err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); |
240 | if (err < 0) | ||
241 | return err; | ||
242 | break; | 261 | break; |
243 | case MTRRIOC_DEL_PAGE_ENTRY: | 262 | case MTRRIOC_DEL_PAGE_ENTRY: |
244 | if (!capable(CAP_SYS_ADMIN)) | 263 | if (!capable(CAP_SYS_ADMIN)) |
245 | return -EPERM; | 264 | return -EPERM; |
246 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
247 | return -EFAULT; | ||
248 | err = mtrr_file_del(sentry.base, sentry.size, file, 1); | 265 | err = mtrr_file_del(sentry.base, sentry.size, file, 1); |
249 | if (err < 0) | ||
250 | return err; | ||
251 | break; | 266 | break; |
252 | case MTRRIOC_KILL_PAGE_ENTRY: | 267 | case MTRRIOC_KILL_PAGE_ENTRY: |
253 | if (!capable(CAP_SYS_ADMIN)) | 268 | if (!capable(CAP_SYS_ADMIN)) |
254 | return -EPERM; | 269 | return -EPERM; |
255 | if (copy_from_user(&sentry, arg, sizeof sentry)) | ||
256 | return -EFAULT; | ||
257 | err = mtrr_del_page(-1, sentry.base, sentry.size); | 270 | err = mtrr_del_page(-1, sentry.base, sentry.size); |
258 | if (err < 0) | ||
259 | return err; | ||
260 | break; | 271 | break; |
261 | case MTRRIOC_GET_PAGE_ENTRY: | 272 | case MTRRIOC_GET_PAGE_ENTRY: |
262 | if (copy_from_user(&gentry, arg, sizeof gentry)) | ||
263 | return -EFAULT; | ||
264 | if (gentry.regnum >= num_var_ranges) | 273 | if (gentry.regnum >= num_var_ranges) |
265 | return -EINVAL; | 274 | return -EINVAL; |
266 | mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); | 275 | mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); |
267 | gentry.type = type; | 276 | gentry.type = type; |
277 | break; | ||
278 | } | ||
279 | |||
280 | if (err) | ||
281 | return err; | ||
268 | 282 | ||
283 | switch(cmd) { | ||
284 | case MTRRIOC_GET_ENTRY: | ||
285 | case MTRRIOC_GET_PAGE_ENTRY: | ||
269 | if (copy_to_user(arg, &gentry, sizeof gentry)) | 286 | if (copy_to_user(arg, &gentry, sizeof gentry)) |
270 | return -EFAULT; | 287 | err = -EFAULT; |
288 | break; | ||
289 | #ifdef CONFIG_COMPAT | ||
290 | case MTRRIOC32_GET_ENTRY: | ||
291 | case MTRRIOC32_GET_PAGE_ENTRY: { | ||
292 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | ||
293 | err = put_user(gentry.base, &g32->base); | ||
294 | err |= put_user(gentry.size, &g32->size); | ||
295 | err |= put_user(gentry.regnum, &g32->regnum); | ||
296 | err |= put_user(gentry.type, &g32->type); | ||
271 | break; | 297 | break; |
272 | } | 298 | } |
273 | return 0; | 299 | #endif |
300 | } | ||
301 | return err; | ||
274 | } | 302 | } |
275 | 303 | ||
276 | static int | 304 | static int |
@@ -310,7 +338,8 @@ static struct file_operations mtrr_fops = { | |||
310 | .read = seq_read, | 338 | .read = seq_read, |
311 | .llseek = seq_lseek, | 339 | .llseek = seq_lseek, |
312 | .write = mtrr_write, | 340 | .write = mtrr_write, |
313 | .ioctl = mtrr_ioctl, | 341 | .unlocked_ioctl = mtrr_ioctl, |
342 | .compat_ioctl = mtrr_ioctl, | ||
314 | .release = mtrr_close, | 343 | .release = mtrr_close, |
315 | }; | 344 | }; |
316 | 345 | ||
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index 8bd77d948a84..41b871ecf4b3 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c | |||
@@ -44,7 +44,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
45 | 45 | ||
46 | /* Intel-defined (#2) */ | 46 | /* Intel-defined (#2) */ |
47 | "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est", | 47 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est", |
48 | "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, | 48 | "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, |
49 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 49 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
50 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 50 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index 4647db4ad6de..13bae799e626 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c | |||
@@ -163,7 +163,7 @@ static int cpuid_class_device_create(int i) | |||
163 | int err = 0; | 163 | int err = 0; |
164 | struct class_device *class_err; | 164 | struct class_device *class_err; |
165 | 165 | ||
166 | class_err = class_device_create(cpuid_class, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i); | 166 | class_err = class_device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i); |
167 | if (IS_ERR(class_err)) | 167 | if (IS_ERR(class_err)) |
168 | err = PTR_ERR(class_err); | 168 | err = PTR_ERR(class_err); |
169 | return err; | 169 | return err; |
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index 0248e084017c..af809ccf5fbe 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <asm/hardirq.h> | 21 | #include <asm/hardirq.h> |
22 | #include <asm/nmi.h> | 22 | #include <asm/nmi.h> |
23 | #include <asm/hw_irq.h> | 23 | #include <asm/hw_irq.h> |
24 | #include <asm/apic.h> | ||
25 | #include <mach_ipi.h> | 24 | #include <mach_ipi.h> |
26 | 25 | ||
27 | 26 | ||
@@ -148,7 +147,6 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) | |||
148 | regs = &fixed_regs; | 147 | regs = &fixed_regs; |
149 | } | 148 | } |
150 | crash_save_this_cpu(regs, cpu); | 149 | crash_save_this_cpu(regs, cpu); |
151 | disable_local_APIC(); | ||
152 | atomic_dec(&waiting_for_crash_ipi); | 150 | atomic_dec(&waiting_for_crash_ipi); |
153 | /* Assume hlt works */ | 151 | /* Assume hlt works */ |
154 | halt(); | 152 | halt(); |
@@ -188,7 +186,6 @@ static void nmi_shootdown_cpus(void) | |||
188 | } | 186 | } |
189 | 187 | ||
190 | /* Leave the nmi callback set */ | 188 | /* Leave the nmi callback set */ |
191 | disable_local_APIC(); | ||
192 | } | 189 | } |
193 | #else | 190 | #else |
194 | static void nmi_shootdown_cpus(void) | 191 | static void nmi_shootdown_cpus(void) |
@@ -213,9 +210,5 @@ void machine_crash_shutdown(struct pt_regs *regs) | |||
213 | /* Make a note of crashing cpu. Will be used in NMI callback.*/ | 210 | /* Make a note of crashing cpu. Will be used in NMI callback.*/ |
214 | crashing_cpu = smp_processor_id(); | 211 | crashing_cpu = smp_processor_id(); |
215 | nmi_shootdown_cpus(); | 212 | nmi_shootdown_cpus(); |
216 | lapic_shutdown(); | ||
217 | #if defined(CONFIG_X86_IO_APIC) | ||
218 | disable_IO_APIC(); | ||
219 | #endif | ||
220 | crash_save_self(regs); | 213 | crash_save_self(regs); |
221 | } | 214 | } |
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index 323ef8ab3244..d86f24909284 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c | |||
@@ -435,4 +435,8 @@ void __init init_IRQ(void) | |||
435 | setup_irq(FPU_IRQ, &fpu_irq); | 435 | setup_irq(FPU_IRQ, &fpu_irq); |
436 | 436 | ||
437 | irq_ctx_init(smp_processor_id()); | 437 | irq_ctx_init(smp_processor_id()); |
438 | |||
439 | #ifdef CONFIG_X86_LOCAL_APIC | ||
440 | APIC_init(); | ||
441 | #endif | ||
438 | } | 442 | } |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index fb3991e8229e..5a77c52b20a9 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -46,6 +46,9 @@ | |||
46 | int (*ioapic_renumber_irq)(int ioapic, int irq); | 46 | int (*ioapic_renumber_irq)(int ioapic, int irq); |
47 | atomic_t irq_mis_count; | 47 | atomic_t irq_mis_count; |
48 | 48 | ||
49 | /* Where if anywhere is the i8259 connect in external int mode */ | ||
50 | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; | ||
51 | |||
49 | static DEFINE_SPINLOCK(ioapic_lock); | 52 | static DEFINE_SPINLOCK(ioapic_lock); |
50 | 53 | ||
51 | /* | 54 | /* |
@@ -738,7 +741,7 @@ static int find_irq_entry(int apic, int pin, int type) | |||
738 | /* | 741 | /* |
739 | * Find the pin to which IRQ[irq] (ISA) is connected | 742 | * Find the pin to which IRQ[irq] (ISA) is connected |
740 | */ | 743 | */ |
741 | static int find_isa_irq_pin(int irq, int type) | 744 | static int __init find_isa_irq_pin(int irq, int type) |
742 | { | 745 | { |
743 | int i; | 746 | int i; |
744 | 747 | ||
@@ -758,6 +761,33 @@ static int find_isa_irq_pin(int irq, int type) | |||
758 | return -1; | 761 | return -1; |
759 | } | 762 | } |
760 | 763 | ||
764 | static int __init find_isa_irq_apic(int irq, int type) | ||
765 | { | ||
766 | int i; | ||
767 | |||
768 | for (i = 0; i < mp_irq_entries; i++) { | ||
769 | int lbus = mp_irqs[i].mpc_srcbus; | ||
770 | |||
771 | if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || | ||
772 | mp_bus_id_to_type[lbus] == MP_BUS_EISA || | ||
773 | mp_bus_id_to_type[lbus] == MP_BUS_MCA || | ||
774 | mp_bus_id_to_type[lbus] == MP_BUS_NEC98 | ||
775 | ) && | ||
776 | (mp_irqs[i].mpc_irqtype == type) && | ||
777 | (mp_irqs[i].mpc_srcbusirq == irq)) | ||
778 | break; | ||
779 | } | ||
780 | if (i < mp_irq_entries) { | ||
781 | int apic; | ||
782 | for(apic = 0; apic < nr_ioapics; apic++) { | ||
783 | if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic) | ||
784 | return apic; | ||
785 | } | ||
786 | } | ||
787 | |||
788 | return -1; | ||
789 | } | ||
790 | |||
761 | /* | 791 | /* |
762 | * Find a specific PCI IRQ entry. | 792 | * Find a specific PCI IRQ entry. |
763 | * Not an __init, possibly needed by modules | 793 | * Not an __init, possibly needed by modules |
@@ -1253,7 +1283,7 @@ static void __init setup_IO_APIC_irqs(void) | |||
1253 | /* | 1283 | /* |
1254 | * Set up the 8259A-master output pin: | 1284 | * Set up the 8259A-master output pin: |
1255 | */ | 1285 | */ |
1256 | static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector) | 1286 | static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) |
1257 | { | 1287 | { |
1258 | struct IO_APIC_route_entry entry; | 1288 | struct IO_APIC_route_entry entry; |
1259 | unsigned long flags; | 1289 | unsigned long flags; |
@@ -1287,8 +1317,8 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector) | |||
1287 | * Add it to the IO-APIC irq-routing table: | 1317 | * Add it to the IO-APIC irq-routing table: |
1288 | */ | 1318 | */ |
1289 | spin_lock_irqsave(&ioapic_lock, flags); | 1319 | spin_lock_irqsave(&ioapic_lock, flags); |
1290 | io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); | 1320 | io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); |
1291 | io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0)); | 1321 | io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); |
1292 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1322 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1293 | 1323 | ||
1294 | enable_8259A_irq(0); | 1324 | enable_8259A_irq(0); |
@@ -1595,7 +1625,8 @@ void /*__init*/ print_PIC(void) | |||
1595 | static void __init enable_IO_APIC(void) | 1625 | static void __init enable_IO_APIC(void) |
1596 | { | 1626 | { |
1597 | union IO_APIC_reg_01 reg_01; | 1627 | union IO_APIC_reg_01 reg_01; |
1598 | int i; | 1628 | int i8259_apic, i8259_pin; |
1629 | int i, apic; | ||
1599 | unsigned long flags; | 1630 | unsigned long flags; |
1600 | 1631 | ||
1601 | for (i = 0; i < PIN_MAP_SIZE; i++) { | 1632 | for (i = 0; i < PIN_MAP_SIZE; i++) { |
@@ -1609,11 +1640,52 @@ static void __init enable_IO_APIC(void) | |||
1609 | /* | 1640 | /* |
1610 | * The number of IO-APIC IRQ registers (== #pins): | 1641 | * The number of IO-APIC IRQ registers (== #pins): |
1611 | */ | 1642 | */ |
1612 | for (i = 0; i < nr_ioapics; i++) { | 1643 | for (apic = 0; apic < nr_ioapics; apic++) { |
1613 | spin_lock_irqsave(&ioapic_lock, flags); | 1644 | spin_lock_irqsave(&ioapic_lock, flags); |
1614 | reg_01.raw = io_apic_read(i, 1); | 1645 | reg_01.raw = io_apic_read(apic, 1); |
1615 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1646 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1616 | nr_ioapic_registers[i] = reg_01.bits.entries+1; | 1647 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; |
1648 | } | ||
1649 | for(apic = 0; apic < nr_ioapics; apic++) { | ||
1650 | int pin; | ||
1651 | /* See if any of the pins is in ExtINT mode */ | ||
1652 | for(pin = 0; pin < nr_ioapic_registers[i]; pin++) { | ||
1653 | struct IO_APIC_route_entry entry; | ||
1654 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1655 | *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin); | ||
1656 | *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin); | ||
1657 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1658 | |||
1659 | |||
1660 | /* If the interrupt line is enabled and in ExtInt mode | ||
1661 | * I have found the pin where the i8259 is connected. | ||
1662 | */ | ||
1663 | if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { | ||
1664 | ioapic_i8259.apic = apic; | ||
1665 | ioapic_i8259.pin = pin; | ||
1666 | goto found_i8259; | ||
1667 | } | ||
1668 | } | ||
1669 | } | ||
1670 | found_i8259: | ||
1671 | /* Look to see what if the MP table has reported the ExtINT */ | ||
1672 | /* If we could not find the appropriate pin by looking at the ioapic | ||
1673 | * the i8259 probably is not connected the ioapic but give the | ||
1674 | * mptable a chance anyway. | ||
1675 | */ | ||
1676 | i8259_pin = find_isa_irq_pin(0, mp_ExtINT); | ||
1677 | i8259_apic = find_isa_irq_apic(0, mp_ExtINT); | ||
1678 | /* Trust the MP table if nothing is setup in the hardware */ | ||
1679 | if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { | ||
1680 | printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); | ||
1681 | ioapic_i8259.pin = i8259_pin; | ||
1682 | ioapic_i8259.apic = i8259_apic; | ||
1683 | } | ||
1684 | /* Complain if the MP table and the hardware disagree */ | ||
1685 | if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && | ||
1686 | (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) | ||
1687 | { | ||
1688 | printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); | ||
1617 | } | 1689 | } |
1618 | 1690 | ||
1619 | /* | 1691 | /* |
@@ -1627,7 +1699,6 @@ static void __init enable_IO_APIC(void) | |||
1627 | */ | 1699 | */ |
1628 | void disable_IO_APIC(void) | 1700 | void disable_IO_APIC(void) |
1629 | { | 1701 | { |
1630 | int pin; | ||
1631 | /* | 1702 | /* |
1632 | * Clear the IO-APIC before rebooting: | 1703 | * Clear the IO-APIC before rebooting: |
1633 | */ | 1704 | */ |
@@ -1638,8 +1709,7 @@ void disable_IO_APIC(void) | |||
1638 | * Put that IOAPIC in virtual wire mode | 1709 | * Put that IOAPIC in virtual wire mode |
1639 | * so legacy interrupts can be delivered. | 1710 | * so legacy interrupts can be delivered. |
1640 | */ | 1711 | */ |
1641 | pin = find_isa_irq_pin(0, mp_ExtINT); | 1712 | if (ioapic_i8259.pin != -1) { |
1642 | if (pin != -1) { | ||
1643 | struct IO_APIC_route_entry entry; | 1713 | struct IO_APIC_route_entry entry; |
1644 | unsigned long flags; | 1714 | unsigned long flags; |
1645 | 1715 | ||
@@ -1650,7 +1720,7 @@ void disable_IO_APIC(void) | |||
1650 | entry.polarity = 0; /* High */ | 1720 | entry.polarity = 0; /* High */ |
1651 | entry.delivery_status = 0; | 1721 | entry.delivery_status = 0; |
1652 | entry.dest_mode = 0; /* Physical */ | 1722 | entry.dest_mode = 0; /* Physical */ |
1653 | entry.delivery_mode = 7; /* ExtInt */ | 1723 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1654 | entry.vector = 0; | 1724 | entry.vector = 0; |
1655 | entry.dest.physical.physical_dest = 0; | 1725 | entry.dest.physical.physical_dest = 0; |
1656 | 1726 | ||
@@ -1659,11 +1729,13 @@ void disable_IO_APIC(void) | |||
1659 | * Add it to the IO-APIC irq-routing table: | 1729 | * Add it to the IO-APIC irq-routing table: |
1660 | */ | 1730 | */ |
1661 | spin_lock_irqsave(&ioapic_lock, flags); | 1731 | spin_lock_irqsave(&ioapic_lock, flags); |
1662 | io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); | 1732 | io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin, |
1663 | io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0)); | 1733 | *(((int *)&entry)+1)); |
1734 | io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin, | ||
1735 | *(((int *)&entry)+0)); | ||
1664 | spin_unlock_irqrestore(&ioapic_lock, flags); | 1736 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1665 | } | 1737 | } |
1666 | disconnect_bsp_APIC(pin != -1); | 1738 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); |
1667 | } | 1739 | } |
1668 | 1740 | ||
1669 | /* | 1741 | /* |
@@ -2113,20 +2185,21 @@ static void setup_nmi (void) | |||
2113 | */ | 2185 | */ |
2114 | static inline void unlock_ExtINT_logic(void) | 2186 | static inline void unlock_ExtINT_logic(void) |
2115 | { | 2187 | { |
2116 | int pin, i; | 2188 | int apic, pin, i; |
2117 | struct IO_APIC_route_entry entry0, entry1; | 2189 | struct IO_APIC_route_entry entry0, entry1; |
2118 | unsigned char save_control, save_freq_select; | 2190 | unsigned char save_control, save_freq_select; |
2119 | unsigned long flags; | 2191 | unsigned long flags; |
2120 | 2192 | ||
2121 | pin = find_isa_irq_pin(8, mp_INT); | 2193 | pin = find_isa_irq_pin(8, mp_INT); |
2194 | apic = find_isa_irq_apic(8, mp_INT); | ||
2122 | if (pin == -1) | 2195 | if (pin == -1) |
2123 | return; | 2196 | return; |
2124 | 2197 | ||
2125 | spin_lock_irqsave(&ioapic_lock, flags); | 2198 | spin_lock_irqsave(&ioapic_lock, flags); |
2126 | *(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin); | 2199 | *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin); |
2127 | *(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin); | 2200 | *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin); |
2128 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2201 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2129 | clear_IO_APIC_pin(0, pin); | 2202 | clear_IO_APIC_pin(apic, pin); |
2130 | 2203 | ||
2131 | memset(&entry1, 0, sizeof(entry1)); | 2204 | memset(&entry1, 0, sizeof(entry1)); |
2132 | 2205 | ||
@@ -2139,8 +2212,8 @@ static inline void unlock_ExtINT_logic(void) | |||
2139 | entry1.vector = 0; | 2212 | entry1.vector = 0; |
2140 | 2213 | ||
2141 | spin_lock_irqsave(&ioapic_lock, flags); | 2214 | spin_lock_irqsave(&ioapic_lock, flags); |
2142 | io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); | 2215 | io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); |
2143 | io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); | 2216 | io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); |
2144 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2217 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2145 | 2218 | ||
2146 | save_control = CMOS_READ(RTC_CONTROL); | 2219 | save_control = CMOS_READ(RTC_CONTROL); |
@@ -2158,11 +2231,11 @@ static inline void unlock_ExtINT_logic(void) | |||
2158 | 2231 | ||
2159 | CMOS_WRITE(save_control, RTC_CONTROL); | 2232 | CMOS_WRITE(save_control, RTC_CONTROL); |
2160 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | 2233 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); |
2161 | clear_IO_APIC_pin(0, pin); | 2234 | clear_IO_APIC_pin(apic, pin); |
2162 | 2235 | ||
2163 | spin_lock_irqsave(&ioapic_lock, flags); | 2236 | spin_lock_irqsave(&ioapic_lock, flags); |
2164 | io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); | 2237 | io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); |
2165 | io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); | 2238 | io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); |
2166 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2239 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2167 | } | 2240 | } |
2168 | 2241 | ||
@@ -2174,7 +2247,7 @@ static inline void unlock_ExtINT_logic(void) | |||
2174 | */ | 2247 | */ |
2175 | static inline void check_timer(void) | 2248 | static inline void check_timer(void) |
2176 | { | 2249 | { |
2177 | int pin1, pin2; | 2250 | int apic1, pin1, apic2, pin2; |
2178 | int vector; | 2251 | int vector; |
2179 | 2252 | ||
2180 | /* | 2253 | /* |
@@ -2196,10 +2269,13 @@ static inline void check_timer(void) | |||
2196 | timer_ack = 1; | 2269 | timer_ack = 1; |
2197 | enable_8259A_irq(0); | 2270 | enable_8259A_irq(0); |
2198 | 2271 | ||
2199 | pin1 = find_isa_irq_pin(0, mp_INT); | 2272 | pin1 = find_isa_irq_pin(0, mp_INT); |
2200 | pin2 = find_isa_irq_pin(0, mp_ExtINT); | 2273 | apic1 = find_isa_irq_apic(0, mp_INT); |
2274 | pin2 = ioapic_i8259.pin; | ||
2275 | apic2 = ioapic_i8259.apic; | ||
2201 | 2276 | ||
2202 | printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2); | 2277 | printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
2278 | vector, apic1, pin1, apic2, pin2); | ||
2203 | 2279 | ||
2204 | if (pin1 != -1) { | 2280 | if (pin1 != -1) { |
2205 | /* | 2281 | /* |
@@ -2216,8 +2292,9 @@ static inline void check_timer(void) | |||
2216 | clear_IO_APIC_pin(0, pin1); | 2292 | clear_IO_APIC_pin(0, pin1); |
2217 | return; | 2293 | return; |
2218 | } | 2294 | } |
2219 | clear_IO_APIC_pin(0, pin1); | 2295 | clear_IO_APIC_pin(apic1, pin1); |
2220 | printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n"); | 2296 | printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " |
2297 | "IO-APIC\n"); | ||
2221 | } | 2298 | } |
2222 | 2299 | ||
2223 | printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); | 2300 | printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); |
@@ -2226,13 +2303,13 @@ static inline void check_timer(void) | |||
2226 | /* | 2303 | /* |
2227 | * legacy devices should be connected to IO APIC #0 | 2304 | * legacy devices should be connected to IO APIC #0 |
2228 | */ | 2305 | */ |
2229 | setup_ExtINT_IRQ0_pin(pin2, vector); | 2306 | setup_ExtINT_IRQ0_pin(apic2, pin2, vector); |
2230 | if (timer_irq_works()) { | 2307 | if (timer_irq_works()) { |
2231 | printk("works.\n"); | 2308 | printk("works.\n"); |
2232 | if (pin1 != -1) | 2309 | if (pin1 != -1) |
2233 | replace_pin_at_irq(0, 0, pin1, 0, pin2); | 2310 | replace_pin_at_irq(0, apic1, pin1, apic2, pin2); |
2234 | else | 2311 | else |
2235 | add_pin_to_irq(0, 0, pin2); | 2312 | add_pin_to_irq(0, apic2, pin2); |
2236 | if (nmi_watchdog == NMI_IO_APIC) { | 2313 | if (nmi_watchdog == NMI_IO_APIC) { |
2237 | setup_nmi(); | 2314 | setup_nmi(); |
2238 | } | 2315 | } |
@@ -2241,7 +2318,7 @@ static inline void check_timer(void) | |||
2241 | /* | 2318 | /* |
2242 | * Cleanup, just in case ... | 2319 | * Cleanup, just in case ... |
2243 | */ | 2320 | */ |
2244 | clear_IO_APIC_pin(0, pin2); | 2321 | clear_IO_APIC_pin(apic2, pin2); |
2245 | } | 2322 | } |
2246 | printk(" failed.\n"); | 2323 | printk(" failed.\n"); |
2247 | 2324 | ||
@@ -2310,11 +2387,15 @@ void __init setup_IO_APIC(void) | |||
2310 | sync_Arb_IDs(); | 2387 | sync_Arb_IDs(); |
2311 | setup_IO_APIC_irqs(); | 2388 | setup_IO_APIC_irqs(); |
2312 | init_IO_APIC_traps(); | 2389 | init_IO_APIC_traps(); |
2313 | check_timer(); | ||
2314 | if (!acpi_ioapic) | 2390 | if (!acpi_ioapic) |
2315 | print_IO_APIC(); | 2391 | print_IO_APIC(); |
2316 | } | 2392 | } |
2317 | 2393 | ||
2394 | void __init IO_APIC_late_time_init(void) | ||
2395 | { | ||
2396 | check_timer(); | ||
2397 | } | ||
2398 | |||
2318 | /* | 2399 | /* |
2319 | * Called after all the initialization is done. If we didnt find any | 2400 | * Called after all the initialization is done. If we didnt find any |
2320 | * APIC bugs then we can allow the modify fast path | 2401 | * APIC bugs then we can allow the modify fast path |
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index ce66dcc26d90..1a201a932865 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c | |||
@@ -218,7 +218,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
218 | 218 | ||
219 | if (i == 0) { | 219 | if (i == 0) { |
220 | seq_printf(p, " "); | 220 | seq_printf(p, " "); |
221 | for_each_cpu(j) | 221 | for_each_online_cpu(j) |
222 | seq_printf(p, "CPU%d ",j); | 222 | seq_printf(p, "CPU%d ",j); |
223 | seq_putc(p, '\n'); | 223 | seq_putc(p, '\n'); |
224 | } | 224 | } |
@@ -232,7 +232,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
232 | #ifndef CONFIG_SMP | 232 | #ifndef CONFIG_SMP |
233 | seq_printf(p, "%10u ", kstat_irqs(i)); | 233 | seq_printf(p, "%10u ", kstat_irqs(i)); |
234 | #else | 234 | #else |
235 | for_each_cpu(j) | 235 | for_each_online_cpu(j) |
236 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 236 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
237 | #endif | 237 | #endif |
238 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 238 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
@@ -246,12 +246,12 @@ skip: | |||
246 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 246 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
247 | } else if (i == NR_IRQS) { | 247 | } else if (i == NR_IRQS) { |
248 | seq_printf(p, "NMI: "); | 248 | seq_printf(p, "NMI: "); |
249 | for_each_cpu(j) | 249 | for_each_online_cpu(j) |
250 | seq_printf(p, "%10u ", nmi_count(j)); | 250 | seq_printf(p, "%10u ", nmi_count(j)); |
251 | seq_putc(p, '\n'); | 251 | seq_putc(p, '\n'); |
252 | #ifdef CONFIG_X86_LOCAL_APIC | 252 | #ifdef CONFIG_X86_LOCAL_APIC |
253 | seq_printf(p, "LOC: "); | 253 | seq_printf(p, "LOC: "); |
254 | for_each_cpu(j) | 254 | for_each_online_cpu(j) |
255 | seq_printf(p, "%10u ", | 255 | seq_printf(p, "%10u ", |
256 | per_cpu(irq_stat,j).apic_timer_irqs); | 256 | per_cpu(irq_stat,j).apic_timer_irqs); |
257 | seq_putc(p, '\n'); | 257 | seq_putc(p, '\n'); |
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index 27aabfceb67e..8f767d9aa45d 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c | |||
@@ -69,7 +69,7 @@ unsigned int def_to_bigsmp = 0; | |||
69 | /* Processor that is doing the boot up */ | 69 | /* Processor that is doing the boot up */ |
70 | unsigned int boot_cpu_physical_apicid = -1U; | 70 | unsigned int boot_cpu_physical_apicid = -1U; |
71 | /* Internal processor count */ | 71 | /* Internal processor count */ |
72 | static unsigned int __initdata num_processors; | 72 | static unsigned int __devinitdata num_processors; |
73 | 73 | ||
74 | /* Bitmask of physically existing CPUs */ | 74 | /* Bitmask of physically existing CPUs */ |
75 | physid_mask_t phys_cpu_present_map; | 75 | physid_mask_t phys_cpu_present_map; |
@@ -119,7 +119,7 @@ static int MP_valid_apicid(int apicid, int version) | |||
119 | } | 119 | } |
120 | #endif | 120 | #endif |
121 | 121 | ||
122 | static void __init MP_processor_info (struct mpc_config_processor *m) | 122 | static void __devinit MP_processor_info (struct mpc_config_processor *m) |
123 | { | 123 | { |
124 | int ver, apicid; | 124 | int ver, apicid; |
125 | physid_mask_t phys_cpu; | 125 | physid_mask_t phys_cpu; |
@@ -182,17 +182,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m) | |||
182 | boot_cpu_physical_apicid = m->mpc_apicid; | 182 | boot_cpu_physical_apicid = m->mpc_apicid; |
183 | } | 183 | } |
184 | 184 | ||
185 | if (num_processors >= NR_CPUS) { | ||
186 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." | ||
187 | " Processor ignored.\n", NR_CPUS); | ||
188 | return; | ||
189 | } | ||
190 | |||
191 | if (num_processors >= maxcpus) { | ||
192 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." | ||
193 | " Processor ignored.\n", maxcpus); | ||
194 | return; | ||
195 | } | ||
196 | ver = m->mpc_apicver; | 185 | ver = m->mpc_apicver; |
197 | 186 | ||
198 | if (!MP_valid_apicid(apicid, ver)) { | 187 | if (!MP_valid_apicid(apicid, ver)) { |
@@ -201,11 +190,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m) | |||
201 | return; | 190 | return; |
202 | } | 191 | } |
203 | 192 | ||
204 | cpu_set(num_processors, cpu_possible_map); | ||
205 | num_processors++; | ||
206 | phys_cpu = apicid_to_cpu_present(apicid); | ||
207 | physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); | ||
208 | |||
209 | /* | 193 | /* |
210 | * Validate version | 194 | * Validate version |
211 | */ | 195 | */ |
@@ -216,6 +200,25 @@ static void __init MP_processor_info (struct mpc_config_processor *m) | |||
216 | ver = 0x10; | 200 | ver = 0x10; |
217 | } | 201 | } |
218 | apic_version[m->mpc_apicid] = ver; | 202 | apic_version[m->mpc_apicid] = ver; |
203 | |||
204 | phys_cpu = apicid_to_cpu_present(apicid); | ||
205 | physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); | ||
206 | |||
207 | if (num_processors >= NR_CPUS) { | ||
208 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." | ||
209 | " Processor ignored.\n", NR_CPUS); | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | if (num_processors >= maxcpus) { | ||
214 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." | ||
215 | " Processor ignored.\n", maxcpus); | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | cpu_set(num_processors, cpu_possible_map); | ||
220 | num_processors++; | ||
221 | |||
219 | if ((num_processors > 8) && | 222 | if ((num_processors > 8) && |
220 | APIC_XAPIC(ver) && | 223 | APIC_XAPIC(ver) && |
221 | (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) | 224 | (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) |
@@ -834,7 +837,7 @@ void __init mp_register_lapic_address ( | |||
834 | } | 837 | } |
835 | 838 | ||
836 | 839 | ||
837 | void __init mp_register_lapic ( | 840 | void __devinit mp_register_lapic ( |
838 | u8 id, | 841 | u8 id, |
839 | u8 enabled) | 842 | u8 enabled) |
840 | { | 843 | { |
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index 03100d6fc5d6..44470fea4309 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c | |||
@@ -246,7 +246,7 @@ static int msr_class_device_create(int i) | |||
246 | int err = 0; | 246 | int err = 0; |
247 | struct class_device *class_err; | 247 | struct class_device *class_err; |
248 | 248 | ||
249 | class_err = class_device_create(msr_class, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i); | 249 | class_err = class_device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i); |
250 | if (IS_ERR(class_err)) | 250 | if (IS_ERR(class_err)) |
251 | err = PTR_ERR(class_err); | 251 | err = PTR_ERR(class_err); |
252 | return err; | 252 | return err; |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 72515b8a1b12..d661703ac1cb 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -100,16 +100,44 @@ int nmi_active; | |||
100 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | 100 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ |
101 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | 101 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) |
102 | 102 | ||
103 | #ifdef CONFIG_SMP | ||
104 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when | ||
105 | * the CPU is idle. To make sure the NMI watchdog really ticks on all | ||
106 | * CPUs during the test make them busy. | ||
107 | */ | ||
108 | static __init void nmi_cpu_busy(void *data) | ||
109 | { | ||
110 | volatile int *endflag = data; | ||
111 | local_irq_enable(); | ||
112 | /* Intentionally don't use cpu_relax here. This is | ||
113 | to make sure that the performance counter really ticks, | ||
114 | even if there is a simulator or similar that catches the | ||
115 | pause instruction. On a real HT machine this is fine because | ||
116 | all other CPUs are busy with "useless" delay loops and don't | ||
117 | care if they get somewhat less cycles. */ | ||
118 | while (*endflag == 0) | ||
119 | barrier(); | ||
120 | } | ||
121 | #endif | ||
122 | |||
103 | static int __init check_nmi_watchdog(void) | 123 | static int __init check_nmi_watchdog(void) |
104 | { | 124 | { |
105 | unsigned int prev_nmi_count[NR_CPUS]; | 125 | volatile int endflag = 0; |
126 | unsigned int *prev_nmi_count; | ||
106 | int cpu; | 127 | int cpu; |
107 | 128 | ||
108 | if (nmi_watchdog == NMI_NONE) | 129 | if (nmi_watchdog == NMI_NONE) |
109 | return 0; | 130 | return 0; |
110 | 131 | ||
132 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); | ||
133 | if (!prev_nmi_count) | ||
134 | return -1; | ||
135 | |||
111 | printk(KERN_INFO "Testing NMI watchdog ... "); | 136 | printk(KERN_INFO "Testing NMI watchdog ... "); |
112 | 137 | ||
138 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
139 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | ||
140 | |||
113 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 141 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
114 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; | 142 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; |
115 | local_irq_enable(); | 143 | local_irq_enable(); |
@@ -123,12 +151,18 @@ static int __init check_nmi_watchdog(void) | |||
123 | continue; | 151 | continue; |
124 | #endif | 152 | #endif |
125 | if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { | 153 | if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { |
126 | printk("CPU#%d: NMI appears to be stuck!\n", cpu); | 154 | endflag = 1; |
155 | printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", | ||
156 | cpu, | ||
157 | prev_nmi_count[cpu], | ||
158 | nmi_count(cpu)); | ||
127 | nmi_active = 0; | 159 | nmi_active = 0; |
128 | lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; | 160 | lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; |
161 | kfree(prev_nmi_count); | ||
129 | return -1; | 162 | return -1; |
130 | } | 163 | } |
131 | } | 164 | } |
165 | endflag = 1; | ||
132 | printk("OK.\n"); | 166 | printk("OK.\n"); |
133 | 167 | ||
134 | /* now that we know it works we can reduce NMI frequency to | 168 | /* now that we know it works we can reduce NMI frequency to |
@@ -136,6 +170,7 @@ static int __init check_nmi_watchdog(void) | |||
136 | if (nmi_watchdog == NMI_LOCAL_APIC) | 170 | if (nmi_watchdog == NMI_LOCAL_APIC) |
137 | nmi_hz = 1; | 171 | nmi_hz = 1; |
138 | 172 | ||
173 | kfree(prev_nmi_count); | ||
139 | return 0; | 174 | return 0; |
140 | } | 175 | } |
141 | /* This needs to happen later in boot so counters are working */ | 176 | /* This needs to happen later in boot so counters are working */ |
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 7b6368bf8974..efd11f09c996 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c | |||
@@ -354,7 +354,7 @@ ptrace_set_thread_area(struct task_struct *child, | |||
354 | return 0; | 354 | return 0; |
355 | } | 355 | } |
356 | 356 | ||
357 | asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | 357 | asmlinkage long sys_ptrace(long request, long pid, long addr, long data) |
358 | { | 358 | { |
359 | struct task_struct *child; | 359 | struct task_struct *child; |
360 | struct user * dummy = NULL; | 360 | struct user * dummy = NULL; |
diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c index 1b183b378c2c..c9b87330aeea 100644 --- a/arch/i386/kernel/reboot_fixups.c +++ b/arch/i386/kernel/reboot_fixups.c | |||
@@ -44,7 +44,7 @@ void mach_reboot_fixups(void) | |||
44 | 44 | ||
45 | for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) { | 45 | for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) { |
46 | cur = &(fixups_table[i]); | 46 | cur = &(fixups_table[i]); |
47 | dev = pci_get_device(cur->vendor, cur->device, 0); | 47 | dev = pci_get_device(cur->vendor, cur->device, NULL); |
48 | if (!dev) | 48 | if (!dev) |
49 | continue; | 49 | continue; |
50 | 50 | ||
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 9b8c8a19824d..b48ac635f3c1 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -389,14 +389,24 @@ static void __init limit_regions(unsigned long long size) | |||
389 | } | 389 | } |
390 | } | 390 | } |
391 | for (i = 0; i < e820.nr_map; i++) { | 391 | for (i = 0; i < e820.nr_map; i++) { |
392 | if (e820.map[i].type == E820_RAM) { | 392 | current_addr = e820.map[i].addr + e820.map[i].size; |
393 | current_addr = e820.map[i].addr + e820.map[i].size; | 393 | if (current_addr < size) |
394 | if (current_addr >= size) { | 394 | continue; |
395 | e820.map[i].size -= current_addr-size; | 395 | |
396 | e820.nr_map = i + 1; | 396 | if (e820.map[i].type != E820_RAM) |
397 | return; | 397 | continue; |
398 | } | 398 | |
399 | if (e820.map[i].addr >= size) { | ||
400 | /* | ||
401 | * This region starts past the end of the | ||
402 | * requested size, skip it completely. | ||
403 | */ | ||
404 | e820.nr_map = i; | ||
405 | } else { | ||
406 | e820.nr_map = i + 1; | ||
407 | e820.map[i].size -= current_addr - size; | ||
399 | } | 408 | } |
409 | return; | ||
400 | } | 410 | } |
401 | } | 411 | } |
402 | 412 | ||
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 1fb26d0e30b6..5a2bbe0c4fff 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -87,7 +87,11 @@ EXPORT_SYMBOL(cpu_online_map); | |||
87 | cpumask_t cpu_callin_map; | 87 | cpumask_t cpu_callin_map; |
88 | cpumask_t cpu_callout_map; | 88 | cpumask_t cpu_callout_map; |
89 | EXPORT_SYMBOL(cpu_callout_map); | 89 | EXPORT_SYMBOL(cpu_callout_map); |
90 | #ifdef CONFIG_HOTPLUG_CPU | ||
91 | cpumask_t cpu_possible_map = CPU_MASK_ALL; | ||
92 | #else | ||
90 | cpumask_t cpu_possible_map; | 93 | cpumask_t cpu_possible_map; |
94 | #endif | ||
91 | EXPORT_SYMBOL(cpu_possible_map); | 95 | EXPORT_SYMBOL(cpu_possible_map); |
92 | static cpumask_t smp_commenced_mask; | 96 | static cpumask_t smp_commenced_mask; |
93 | 97 | ||
@@ -1074,6 +1078,16 @@ void *xquad_portio; | |||
1074 | EXPORT_SYMBOL(xquad_portio); | 1078 | EXPORT_SYMBOL(xquad_portio); |
1075 | #endif | 1079 | #endif |
1076 | 1080 | ||
1081 | /* | ||
1082 | * Fall back to non SMP mode after errors. | ||
1083 | * | ||
1084 | */ | ||
1085 | static __init void disable_smp(void) | ||
1086 | { | ||
1087 | cpu_set(0, cpu_sibling_map[0]); | ||
1088 | cpu_set(0, cpu_core_map[0]); | ||
1089 | } | ||
1090 | |||
1077 | static void __init smp_boot_cpus(unsigned int max_cpus) | 1091 | static void __init smp_boot_cpus(unsigned int max_cpus) |
1078 | { | 1092 | { |
1079 | int apicid, cpu, bit, kicked; | 1093 | int apicid, cpu, bit, kicked; |
@@ -1086,7 +1100,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1086 | printk("CPU%d: ", 0); | 1100 | printk("CPU%d: ", 0); |
1087 | print_cpu_info(&cpu_data[0]); | 1101 | print_cpu_info(&cpu_data[0]); |
1088 | 1102 | ||
1089 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | ||
1090 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1103 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
1091 | x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; | 1104 | x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; |
1092 | 1105 | ||
@@ -1098,68 +1111,27 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1098 | cpus_clear(cpu_core_map[0]); | 1111 | cpus_clear(cpu_core_map[0]); |
1099 | cpu_set(0, cpu_core_map[0]); | 1112 | cpu_set(0, cpu_core_map[0]); |
1100 | 1113 | ||
1114 | map_cpu_to_logical_apicid(); | ||
1115 | |||
1101 | /* | 1116 | /* |
1102 | * If we couldn't find an SMP configuration at boot time, | 1117 | * If we couldn't find an SMP configuration at boot time, |
1103 | * get out of here now! | 1118 | * get out of here now! |
1104 | */ | 1119 | */ |
1105 | if (!smp_found_config && !acpi_lapic) { | 1120 | if (!smp_found_config && !acpi_lapic) { |
1106 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); | 1121 | printk(KERN_NOTICE "SMP motherboard not detected.\n"); |
1107 | smpboot_clear_io_apic_irqs(); | 1122 | disable_smp(); |
1108 | phys_cpu_present_map = physid_mask_of_physid(0); | ||
1109 | if (APIC_init_uniprocessor()) | ||
1110 | printk(KERN_NOTICE "Local APIC not detected." | ||
1111 | " Using dummy APIC emulation.\n"); | ||
1112 | map_cpu_to_logical_apicid(); | ||
1113 | cpu_set(0, cpu_sibling_map[0]); | ||
1114 | cpu_set(0, cpu_core_map[0]); | ||
1115 | return; | 1123 | return; |
1116 | } | 1124 | } |
1117 | 1125 | ||
1118 | /* | 1126 | /* |
1119 | * Should not be necessary because the MP table should list the boot | ||
1120 | * CPU too, but we do it for the sake of robustness anyway. | ||
1121 | * Makes no sense to do this check in clustered apic mode, so skip it | ||
1122 | */ | ||
1123 | if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { | ||
1124 | printk("weird, boot CPU (#%d) not listed by the BIOS.\n", | ||
1125 | boot_cpu_physical_apicid); | ||
1126 | physid_set(hard_smp_processor_id(), phys_cpu_present_map); | ||
1127 | } | ||
1128 | |||
1129 | /* | ||
1130 | * If we couldn't find a local APIC, then get out of here now! | ||
1131 | */ | ||
1132 | if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) { | ||
1133 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | ||
1134 | boot_cpu_physical_apicid); | ||
1135 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | ||
1136 | smpboot_clear_io_apic_irqs(); | ||
1137 | phys_cpu_present_map = physid_mask_of_physid(0); | ||
1138 | cpu_set(0, cpu_sibling_map[0]); | ||
1139 | cpu_set(0, cpu_core_map[0]); | ||
1140 | return; | ||
1141 | } | ||
1142 | |||
1143 | verify_local_APIC(); | ||
1144 | |||
1145 | /* | ||
1146 | * If SMP should be disabled, then really disable it! | 1127 | * If SMP should be disabled, then really disable it! |
1147 | */ | 1128 | */ |
1148 | if (!max_cpus) { | 1129 | if (!max_cpus || (enable_local_apic < 0)) { |
1149 | smp_found_config = 0; | 1130 | printk(KERN_INFO "SMP mode deactivated.\n"); |
1150 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); | 1131 | disable_smp(); |
1151 | smpboot_clear_io_apic_irqs(); | ||
1152 | phys_cpu_present_map = physid_mask_of_physid(0); | ||
1153 | cpu_set(0, cpu_sibling_map[0]); | ||
1154 | cpu_set(0, cpu_core_map[0]); | ||
1155 | return; | 1132 | return; |
1156 | } | 1133 | } |
1157 | 1134 | ||
1158 | connect_bsp_APIC(); | ||
1159 | setup_local_APIC(); | ||
1160 | map_cpu_to_logical_apicid(); | ||
1161 | |||
1162 | |||
1163 | setup_portio_remap(); | 1135 | setup_portio_remap(); |
1164 | 1136 | ||
1165 | /* | 1137 | /* |
@@ -1240,10 +1212,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1240 | cpu_set(0, cpu_sibling_map[0]); | 1212 | cpu_set(0, cpu_sibling_map[0]); |
1241 | cpu_set(0, cpu_core_map[0]); | 1213 | cpu_set(0, cpu_core_map[0]); |
1242 | 1214 | ||
1243 | smpboot_setup_io_apic(); | ||
1244 | |||
1245 | setup_boot_APIC_clock(); | ||
1246 | |||
1247 | /* | 1215 | /* |
1248 | * Synchronize the TSC with the AP | 1216 | * Synchronize the TSC with the AP |
1249 | */ | 1217 | */ |
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c index 516bf5653b02..8de658db8146 100644 --- a/arch/i386/kernel/srat.c +++ b/arch/i386/kernel/srat.c | |||
@@ -327,7 +327,12 @@ int __init get_memcfg_from_srat(void) | |||
327 | int tables = 0; | 327 | int tables = 0; |
328 | int i = 0; | 328 | int i = 0; |
329 | 329 | ||
330 | acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, rsdp_address); | 330 | if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, |
331 | rsdp_address))) { | ||
332 | printk("%s: System description tables not found\n", | ||
333 | __FUNCTION__); | ||
334 | goto out_err; | ||
335 | } | ||
331 | 336 | ||
332 | if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { | 337 | if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { |
333 | printk("%s: assigning address to rsdp\n", __FUNCTION__); | 338 | printk("%s: assigning address to rsdp\n", __FUNCTION__); |
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 2883a4d4f01f..07471bba2dc6 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -74,10 +74,6 @@ int pit_latch_buggy; /* extern */ | |||
74 | 74 | ||
75 | #include "do_timer.h" | 75 | #include "do_timer.h" |
76 | 76 | ||
77 | u64 jiffies_64 = INITIAL_JIFFIES; | ||
78 | |||
79 | EXPORT_SYMBOL(jiffies_64); | ||
80 | |||
81 | unsigned int cpu_khz; /* Detected as we calibrate the TSC */ | 77 | unsigned int cpu_khz; /* Detected as we calibrate the TSC */ |
82 | EXPORT_SYMBOL(cpu_khz); | 78 | EXPORT_SYMBOL(cpu_khz); |
83 | 79 | ||
@@ -444,8 +440,8 @@ static int time_init_device(void) | |||
444 | 440 | ||
445 | device_initcall(time_init_device); | 441 | device_initcall(time_init_device); |
446 | 442 | ||
447 | #ifdef CONFIG_HPET_TIMER | ||
448 | extern void (*late_time_init)(void); | 443 | extern void (*late_time_init)(void); |
444 | #ifdef CONFIG_HPET_TIMER | ||
449 | /* Duplicate of time_init() below, with hpet_enable part added */ | 445 | /* Duplicate of time_init() below, with hpet_enable part added */ |
450 | static void __init hpet_time_init(void) | 446 | static void __init hpet_time_init(void) |
451 | { | 447 | { |
@@ -462,6 +458,11 @@ static void __init hpet_time_init(void) | |||
462 | printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); | 458 | printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); |
463 | 459 | ||
464 | time_init_hook(); | 460 | time_init_hook(); |
461 | |||
462 | #ifdef CONFIG_X86_LOCAL_APIC | ||
463 | if (enable_local_apic >= 0) | ||
464 | APIC_late_time_init(); | ||
465 | #endif | ||
465 | } | 466 | } |
466 | #endif | 467 | #endif |
467 | 468 | ||
@@ -486,4 +487,9 @@ void __init time_init(void) | |||
486 | printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); | 487 | printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); |
487 | 488 | ||
488 | time_init_hook(); | 489 | time_init_hook(); |
490 | |||
491 | #ifdef CONFIG_X86_LOCAL_APIC | ||
492 | if (enable_local_apic >= 0) | ||
493 | late_time_init = APIC_late_time_init; | ||
494 | #endif | ||
489 | } | 495 | } |
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c index 658c0629ba6a..9caeaa315cd7 100644 --- a/arch/i386/kernel/time_hpet.c +++ b/arch/i386/kernel/time_hpet.c | |||
@@ -275,6 +275,7 @@ static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ; | |||
275 | static unsigned long PIE_count; | 275 | static unsigned long PIE_count; |
276 | 276 | ||
277 | static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ | 277 | static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ |
278 | static unsigned int hpet_t1_cmp; /* cached comparator register */ | ||
278 | 279 | ||
279 | /* | 280 | /* |
280 | * Timer 1 for RTC, we do not use periodic interrupt feature, | 281 | * Timer 1 for RTC, we do not use periodic interrupt feature, |
@@ -306,10 +307,12 @@ int hpet_rtc_timer_init(void) | |||
306 | cnt = hpet_readl(HPET_COUNTER); | 307 | cnt = hpet_readl(HPET_COUNTER); |
307 | cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); | 308 | cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); |
308 | hpet_writel(cnt, HPET_T1_CMP); | 309 | hpet_writel(cnt, HPET_T1_CMP); |
310 | hpet_t1_cmp = cnt; | ||
309 | local_irq_restore(flags); | 311 | local_irq_restore(flags); |
310 | 312 | ||
311 | cfg = hpet_readl(HPET_T1_CFG); | 313 | cfg = hpet_readl(HPET_T1_CFG); |
312 | cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT; | 314 | cfg &= ~HPET_TN_PERIODIC; |
315 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | ||
313 | hpet_writel(cfg, HPET_T1_CFG); | 316 | hpet_writel(cfg, HPET_T1_CFG); |
314 | 317 | ||
315 | return 1; | 318 | return 1; |
@@ -319,8 +322,12 @@ static void hpet_rtc_timer_reinit(void) | |||
319 | { | 322 | { |
320 | unsigned int cfg, cnt; | 323 | unsigned int cfg, cnt; |
321 | 324 | ||
322 | if (!(PIE_on | AIE_on | UIE_on)) | 325 | if (unlikely(!(PIE_on | AIE_on | UIE_on))) { |
326 | cfg = hpet_readl(HPET_T1_CFG); | ||
327 | cfg &= ~HPET_TN_ENABLE; | ||
328 | hpet_writel(cfg, HPET_T1_CFG); | ||
323 | return; | 329 | return; |
330 | } | ||
324 | 331 | ||
325 | if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) | 332 | if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) |
326 | hpet_rtc_int_freq = PIE_freq; | 333 | hpet_rtc_int_freq = PIE_freq; |
@@ -328,15 +335,10 @@ static void hpet_rtc_timer_reinit(void) | |||
328 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; | 335 | hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; |
329 | 336 | ||
330 | /* It is more accurate to use the comparator value than current count.*/ | 337 | /* It is more accurate to use the comparator value than current count.*/ |
331 | cnt = hpet_readl(HPET_T1_CMP); | 338 | cnt = hpet_t1_cmp; |
332 | cnt += hpet_tick*HZ/hpet_rtc_int_freq; | 339 | cnt += hpet_tick*HZ/hpet_rtc_int_freq; |
333 | hpet_writel(cnt, HPET_T1_CMP); | 340 | hpet_writel(cnt, HPET_T1_CMP); |
334 | 341 | hpet_t1_cmp = cnt; | |
335 | cfg = hpet_readl(HPET_T1_CFG); | ||
336 | cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT; | ||
337 | hpet_writel(cfg, HPET_T1_CFG); | ||
338 | |||
339 | return; | ||
340 | } | 342 | } |
341 | 343 | ||
342 | /* | 344 | /* |
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c index d973a8b681fd..be242723c339 100644 --- a/arch/i386/kernel/timers/timer_hpet.c +++ b/arch/i386/kernel/timers/timer_hpet.c | |||
@@ -30,23 +30,28 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; | |||
30 | * basic equation: | 30 | * basic equation: |
31 | * ns = cycles / (freq / ns_per_sec) | 31 | * ns = cycles / (freq / ns_per_sec) |
32 | * ns = cycles * (ns_per_sec / freq) | 32 | * ns = cycles * (ns_per_sec / freq) |
33 | * ns = cycles * (10^9 / (cpu_mhz * 10^6)) | 33 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) |
34 | * ns = cycles * (10^3 / cpu_mhz) | 34 | * ns = cycles * (10^6 / cpu_khz) |
35 | * | 35 | * |
36 | * Then we use scaling math (suggested by george@mvista.com) to get: | 36 | * Then we use scaling math (suggested by george@mvista.com) to get: |
37 | * ns = cycles * (10^3 * SC / cpu_mhz) / SC | 37 | * ns = cycles * (10^6 * SC / cpu_khz) / SC |
38 | * ns = cycles * cyc2ns_scale / SC | 38 | * ns = cycles * cyc2ns_scale / SC |
39 | * | 39 | * |
40 | * And since SC is a constant power of two, we can convert the div | 40 | * And since SC is a constant power of two, we can convert the div |
41 | * into a shift. | 41 | * into a shift. |
42 | * | ||
43 | * We can use khz divisor instead of mhz to keep a better percision, since | ||
44 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
45 | * (mathieu.desnoyers@polymtl.ca) | ||
46 | * | ||
42 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 47 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
43 | */ | 48 | */ |
44 | static unsigned long cyc2ns_scale; | 49 | static unsigned long cyc2ns_scale; |
45 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 50 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
46 | 51 | ||
47 | static inline void set_cyc2ns_scale(unsigned long cpu_mhz) | 52 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) |
48 | { | 53 | { |
49 | cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; | 54 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; |
50 | } | 55 | } |
51 | 56 | ||
52 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | 57 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) |
@@ -163,7 +168,7 @@ static int __init init_hpet(char* override) | |||
163 | printk("Detected %u.%03u MHz processor.\n", | 168 | printk("Detected %u.%03u MHz processor.\n", |
164 | cpu_khz / 1000, cpu_khz % 1000); | 169 | cpu_khz / 1000, cpu_khz % 1000); |
165 | } | 170 | } |
166 | set_cyc2ns_scale(cpu_khz/1000); | 171 | set_cyc2ns_scale(cpu_khz); |
167 | } | 172 | } |
168 | /* set this only when cpu_has_tsc */ | 173 | /* set this only when cpu_has_tsc */ |
169 | timer_hpet.read_timer = read_timer_tsc; | 174 | timer_hpet.read_timer = read_timer_tsc; |
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index 6dd470cc9f72..d395e3b42485 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c | |||
@@ -49,23 +49,28 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED; | |||
49 | * basic equation: | 49 | * basic equation: |
50 | * ns = cycles / (freq / ns_per_sec) | 50 | * ns = cycles / (freq / ns_per_sec) |
51 | * ns = cycles * (ns_per_sec / freq) | 51 | * ns = cycles * (ns_per_sec / freq) |
52 | * ns = cycles * (10^9 / (cpu_mhz * 10^6)) | 52 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) |
53 | * ns = cycles * (10^3 / cpu_mhz) | 53 | * ns = cycles * (10^6 / cpu_khz) |
54 | * | 54 | * |
55 | * Then we use scaling math (suggested by george@mvista.com) to get: | 55 | * Then we use scaling math (suggested by george@mvista.com) to get: |
56 | * ns = cycles * (10^3 * SC / cpu_mhz) / SC | 56 | * ns = cycles * (10^6 * SC / cpu_khz) / SC |
57 | * ns = cycles * cyc2ns_scale / SC | 57 | * ns = cycles * cyc2ns_scale / SC |
58 | * | 58 | * |
59 | * And since SC is a constant power of two, we can convert the div | 59 | * And since SC is a constant power of two, we can convert the div |
60 | * into a shift. | 60 | * into a shift. |
61 | * | ||
62 | * We can use khz divisor instead of mhz to keep a better percision, since | ||
63 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
64 | * (mathieu.desnoyers@polymtl.ca) | ||
65 | * | ||
61 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 66 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
62 | */ | 67 | */ |
63 | static unsigned long cyc2ns_scale; | 68 | static unsigned long cyc2ns_scale; |
64 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 69 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
65 | 70 | ||
66 | static inline void set_cyc2ns_scale(unsigned long cpu_mhz) | 71 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) |
67 | { | 72 | { |
68 | cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; | 73 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; |
69 | } | 74 | } |
70 | 75 | ||
71 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | 76 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) |
@@ -286,7 +291,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
286 | if (use_tsc) { | 291 | if (use_tsc) { |
287 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | 292 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { |
288 | fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); | 293 | fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); |
289 | set_cyc2ns_scale(cpu_khz/1000); | 294 | set_cyc2ns_scale(cpu_khz); |
290 | } | 295 | } |
291 | } | 296 | } |
292 | #endif | 297 | #endif |
@@ -536,7 +541,7 @@ static int __init init_tsc(char* override) | |||
536 | printk("Detected %u.%03u MHz processor.\n", | 541 | printk("Detected %u.%03u MHz processor.\n", |
537 | cpu_khz / 1000, cpu_khz % 1000); | 542 | cpu_khz / 1000, cpu_khz % 1000); |
538 | } | 543 | } |
539 | set_cyc2ns_scale(cpu_khz/1000); | 544 | set_cyc2ns_scale(cpu_khz); |
540 | return 0; | 545 | return 0; |
541 | } | 546 | } |
542 | } | 547 | } |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 19e90bdd84ea..c34d1bfc5161 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -488,6 +488,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs, | |||
488 | tss->io_bitmap_max - thread->io_bitmap_max); | 488 | tss->io_bitmap_max - thread->io_bitmap_max); |
489 | tss->io_bitmap_max = thread->io_bitmap_max; | 489 | tss->io_bitmap_max = thread->io_bitmap_max; |
490 | tss->io_bitmap_base = IO_BITMAP_OFFSET; | 490 | tss->io_bitmap_base = IO_BITMAP_OFFSET; |
491 | tss->io_bitmap_owner = thread; | ||
491 | put_cpu(); | 492 | put_cpu(); |
492 | return; | 493 | return; |
493 | } | 494 | } |
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index 16b485009622..fc1993564f98 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c | |||
@@ -134,17 +134,16 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) | |||
134 | return ret; | 134 | return ret; |
135 | } | 135 | } |
136 | 136 | ||
137 | static void mark_screen_rdonly(struct task_struct * tsk) | 137 | static void mark_screen_rdonly(struct mm_struct *mm) |
138 | { | 138 | { |
139 | pgd_t *pgd; | 139 | pgd_t *pgd; |
140 | pud_t *pud; | 140 | pud_t *pud; |
141 | pmd_t *pmd; | 141 | pmd_t *pmd; |
142 | pte_t *pte, *mapped; | 142 | pte_t *pte; |
143 | spinlock_t *ptl; | ||
143 | int i; | 144 | int i; |
144 | 145 | ||
145 | preempt_disable(); | 146 | pgd = pgd_offset(mm, 0xA0000); |
146 | spin_lock(&tsk->mm->page_table_lock); | ||
147 | pgd = pgd_offset(tsk->mm, 0xA0000); | ||
148 | if (pgd_none_or_clear_bad(pgd)) | 147 | if (pgd_none_or_clear_bad(pgd)) |
149 | goto out; | 148 | goto out; |
150 | pud = pud_offset(pgd, 0xA0000); | 149 | pud = pud_offset(pgd, 0xA0000); |
@@ -153,16 +152,14 @@ static void mark_screen_rdonly(struct task_struct * tsk) | |||
153 | pmd = pmd_offset(pud, 0xA0000); | 152 | pmd = pmd_offset(pud, 0xA0000); |
154 | if (pmd_none_or_clear_bad(pmd)) | 153 | if (pmd_none_or_clear_bad(pmd)) |
155 | goto out; | 154 | goto out; |
156 | pte = mapped = pte_offset_map(pmd, 0xA0000); | 155 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); |
157 | for (i = 0; i < 32; i++) { | 156 | for (i = 0; i < 32; i++) { |
158 | if (pte_present(*pte)) | 157 | if (pte_present(*pte)) |
159 | set_pte(pte, pte_wrprotect(*pte)); | 158 | set_pte(pte, pte_wrprotect(*pte)); |
160 | pte++; | 159 | pte++; |
161 | } | 160 | } |
162 | pte_unmap(mapped); | 161 | pte_unmap_unlock(pte, ptl); |
163 | out: | 162 | out: |
164 | spin_unlock(&tsk->mm->page_table_lock); | ||
165 | preempt_enable(); | ||
166 | flush_tlb(); | 163 | flush_tlb(); |
167 | } | 164 | } |
168 | 165 | ||
@@ -306,7 +303,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
306 | 303 | ||
307 | tsk->thread.screen_bitmap = info->screen_bitmap; | 304 | tsk->thread.screen_bitmap = info->screen_bitmap; |
308 | if (info->flags & VM86_SCREEN_BITMAP) | 305 | if (info->flags & VM86_SCREEN_BITMAP) |
309 | mark_screen_rdonly(tsk); | 306 | mark_screen_rdonly(tsk->mm); |
310 | __asm__ __volatile__( | 307 | __asm__ __volatile__( |
311 | "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" | 308 | "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" |
312 | "movl %0,%%esp\n\t" | 309 | "movl %0,%%esp\n\t" |