diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-06-04 12:15:51 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-06-04 12:15:51 -0400 |
| commit | 4ded383569d6316d68d2aed298f8eb8d7bca37af (patch) | |
| tree | 87849300140f7a1c4d4efc78760156826cb28557 | |
| parent | e97dcb0eadbb821eccd549d4987b653cf61e2374 (diff) | |
| parent | 870568b39064cab2dd971fe57969916036982862 (diff) | |
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-tip:
x86, fpu: fix CONFIG_PREEMPT=y corruption of application's FPU stack
suspend-vs-iommu: prevent suspend if we could not resume
x86: section mismatch fix
x86: fix Xorg crash with xf86MapVidMem error
x86: fix pointer type warning in arch/x86/mm/init_64.c:early_memtest
x86: fix bad pmd ffff810000207xxx(9090909090909090)
x86: ioremap fix failing nesting check
x86: fix broken math-emu with lazy allocation of fpu area
x86: enable preemption in delay
x86: disable preemption in native_smp_prepare_cpus
x86: fix APIC warning on 32bit v2
| -rw-r--r-- | arch/x86/kernel/acpi/boot.c | 16 | ||||
| -rw-r--r-- | arch/x86/kernel/i387.c | 44 | ||||
| -rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 31 | ||||
| -rw-r--r-- | arch/x86/kernel/process_32.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/process_64.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/smpboot.c | 5 | ||||
| -rw-r--r-- | arch/x86/lib/delay_32.c | 31 | ||||
| -rw-r--r-- | arch/x86/lib/delay_64.c | 30 | ||||
| -rw-r--r-- | arch/x86/math-emu/fpu_entry.c | 13 | ||||
| -rw-r--r-- | arch/x86/mm/init_64.c | 6 | ||||
| -rw-r--r-- | arch/x86/mm/ioremap.c | 5 | ||||
| -rw-r--r-- | arch/x86/mm/pat.c | 51 | ||||
| -rw-r--r-- | include/asm-x86/i387.h | 2 |
13 files changed, 179 insertions, 65 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c49ebcc6c41e..33c5216fd3e1 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
| @@ -242,12 +242,19 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) | |||
| 242 | 242 | ||
| 243 | static void __cpuinit acpi_register_lapic(int id, u8 enabled) | 243 | static void __cpuinit acpi_register_lapic(int id, u8 enabled) |
| 244 | { | 244 | { |
| 245 | unsigned int ver = 0; | ||
| 246 | |||
| 245 | if (!enabled) { | 247 | if (!enabled) { |
| 246 | ++disabled_cpus; | 248 | ++disabled_cpus; |
| 247 | return; | 249 | return; |
| 248 | } | 250 | } |
| 249 | 251 | ||
| 250 | generic_processor_info(id, 0); | 252 | #ifdef CONFIG_X86_32 |
| 253 | if (boot_cpu_physical_apicid != -1U) | ||
| 254 | ver = apic_version[boot_cpu_physical_apicid]; | ||
| 255 | #endif | ||
| 256 | |||
| 257 | generic_processor_info(id, ver); | ||
| 251 | } | 258 | } |
| 252 | 259 | ||
| 253 | static int __init | 260 | static int __init |
| @@ -767,8 +774,13 @@ static void __init acpi_register_lapic_address(unsigned long address) | |||
| 767 | mp_lapic_addr = address; | 774 | mp_lapic_addr = address; |
| 768 | 775 | ||
| 769 | set_fixmap_nocache(FIX_APIC_BASE, address); | 776 | set_fixmap_nocache(FIX_APIC_BASE, address); |
| 770 | if (boot_cpu_physical_apicid == -1U) | 777 | if (boot_cpu_physical_apicid == -1U) { |
| 771 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 778 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); |
| 779 | #ifdef CONFIG_X86_32 | ||
| 780 | apic_version[boot_cpu_physical_apicid] = | ||
| 781 | GET_APIC_VERSION(apic_read(APIC_LVR)); | ||
| 782 | #endif | ||
| 783 | } | ||
| 772 | } | 784 | } |
| 773 | 785 | ||
| 774 | static int __init early_acpi_parse_madt_lapic_addr_ovr(void) | 786 | static int __init early_acpi_parse_madt_lapic_addr_ovr(void) |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index e03cc952f233..eb9ddd8efb82 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
| @@ -56,6 +56,11 @@ void __cpuinit mxcsr_feature_mask_init(void) | |||
| 56 | 56 | ||
| 57 | void __init init_thread_xstate(void) | 57 | void __init init_thread_xstate(void) |
| 58 | { | 58 | { |
| 59 | if (!HAVE_HWFP) { | ||
| 60 | xstate_size = sizeof(struct i387_soft_struct); | ||
| 61 | return; | ||
| 62 | } | ||
| 63 | |||
| 59 | if (cpu_has_fxsr) | 64 | if (cpu_has_fxsr) |
| 60 | xstate_size = sizeof(struct i387_fxsave_struct); | 65 | xstate_size = sizeof(struct i387_fxsave_struct); |
| 61 | #ifdef CONFIG_X86_32 | 66 | #ifdef CONFIG_X86_32 |
| @@ -94,7 +99,7 @@ void __cpuinit fpu_init(void) | |||
| 94 | int init_fpu(struct task_struct *tsk) | 99 | int init_fpu(struct task_struct *tsk) |
| 95 | { | 100 | { |
| 96 | if (tsk_used_math(tsk)) { | 101 | if (tsk_used_math(tsk)) { |
| 97 | if (tsk == current) | 102 | if (HAVE_HWFP && tsk == current) |
| 98 | unlazy_fpu(tsk); | 103 | unlazy_fpu(tsk); |
| 99 | return 0; | 104 | return 0; |
| 100 | } | 105 | } |
| @@ -109,6 +114,15 @@ int init_fpu(struct task_struct *tsk) | |||
| 109 | return -ENOMEM; | 114 | return -ENOMEM; |
| 110 | } | 115 | } |
| 111 | 116 | ||
| 117 | #ifdef CONFIG_X86_32 | ||
| 118 | if (!HAVE_HWFP) { | ||
| 119 | memset(tsk->thread.xstate, 0, xstate_size); | ||
| 120 | finit(); | ||
| 121 | set_stopped_child_used_math(tsk); | ||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | #endif | ||
| 125 | |||
| 112 | if (cpu_has_fxsr) { | 126 | if (cpu_has_fxsr) { |
| 113 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | 127 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; |
| 114 | 128 | ||
| @@ -330,13 +344,13 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
| 330 | struct user_i387_ia32_struct env; | 344 | struct user_i387_ia32_struct env; |
| 331 | int ret; | 345 | int ret; |
| 332 | 346 | ||
| 333 | if (!HAVE_HWFP) | ||
| 334 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); | ||
| 335 | |||
| 336 | ret = init_fpu(target); | 347 | ret = init_fpu(target); |
| 337 | if (ret) | 348 | if (ret) |
| 338 | return ret; | 349 | return ret; |
| 339 | 350 | ||
| 351 | if (!HAVE_HWFP) | ||
| 352 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); | ||
| 353 | |||
| 340 | if (!cpu_has_fxsr) { | 354 | if (!cpu_has_fxsr) { |
| 341 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 355 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 342 | &target->thread.xstate->fsave, 0, | 356 | &target->thread.xstate->fsave, 0, |
| @@ -360,15 +374,15 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
| 360 | struct user_i387_ia32_struct env; | 374 | struct user_i387_ia32_struct env; |
| 361 | int ret; | 375 | int ret; |
| 362 | 376 | ||
| 363 | if (!HAVE_HWFP) | ||
| 364 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | ||
| 365 | |||
| 366 | ret = init_fpu(target); | 377 | ret = init_fpu(target); |
| 367 | if (ret) | 378 | if (ret) |
| 368 | return ret; | 379 | return ret; |
| 369 | 380 | ||
| 370 | set_stopped_child_used_math(target); | 381 | set_stopped_child_used_math(target); |
| 371 | 382 | ||
| 383 | if (!HAVE_HWFP) | ||
| 384 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | ||
| 385 | |||
| 372 | if (!cpu_has_fxsr) { | 386 | if (!cpu_has_fxsr) { |
| 373 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 387 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 374 | &target->thread.xstate->fsave, 0, -1); | 388 | &target->thread.xstate->fsave, 0, -1); |
| @@ -474,18 +488,18 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
| 474 | int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | 488 | int restore_i387_ia32(struct _fpstate_ia32 __user *buf) |
| 475 | { | 489 | { |
| 476 | int err; | 490 | int err; |
| 491 | struct task_struct *tsk = current; | ||
| 477 | 492 | ||
| 478 | if (HAVE_HWFP) { | 493 | if (HAVE_HWFP) |
| 479 | struct task_struct *tsk = current; | ||
| 480 | |||
| 481 | clear_fpu(tsk); | 494 | clear_fpu(tsk); |
| 482 | 495 | ||
| 483 | if (!used_math()) { | 496 | if (!used_math()) { |
| 484 | err = init_fpu(tsk); | 497 | err = init_fpu(tsk); |
| 485 | if (err) | 498 | if (err) |
| 486 | return err; | 499 | return err; |
| 487 | } | 500 | } |
| 488 | 501 | ||
| 502 | if (HAVE_HWFP) { | ||
| 489 | if (cpu_has_fxsr) | 503 | if (cpu_has_fxsr) |
| 490 | err = restore_i387_fxsave(buf); | 504 | err = restore_i387_fxsave(buf); |
| 491 | else | 505 | else |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index c07455d1695f..aa8ec928caa8 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/kdebug.h> | 26 | #include <linux/kdebug.h> |
| 27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
| 28 | #include <linux/iommu-helper.h> | 28 | #include <linux/iommu-helper.h> |
| 29 | #include <linux/sysdev.h> | ||
| 29 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
| 30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
| 31 | #include <asm/mtrr.h> | 32 | #include <asm/mtrr.h> |
| @@ -548,6 +549,28 @@ static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) | |||
| 548 | return aper_base; | 549 | return aper_base; |
| 549 | } | 550 | } |
| 550 | 551 | ||
| 552 | static int gart_resume(struct sys_device *dev) | ||
| 553 | { | ||
| 554 | return 0; | ||
| 555 | } | ||
| 556 | |||
| 557 | static int gart_suspend(struct sys_device *dev, pm_message_t state) | ||
| 558 | { | ||
| 559 | return -EINVAL; | ||
| 560 | } | ||
| 561 | |||
| 562 | static struct sysdev_class gart_sysdev_class = { | ||
| 563 | .name = "gart", | ||
| 564 | .suspend = gart_suspend, | ||
| 565 | .resume = gart_resume, | ||
| 566 | |||
| 567 | }; | ||
| 568 | |||
| 569 | static struct sys_device device_gart = { | ||
| 570 | .id = 0, | ||
| 571 | .cls = &gart_sysdev_class, | ||
| 572 | }; | ||
| 573 | |||
| 551 | /* | 574 | /* |
| 552 | * Private Northbridge GATT initialization in case we cannot use the | 575 | * Private Northbridge GATT initialization in case we cannot use the |
| 553 | * AGP driver for some reason. | 576 | * AGP driver for some reason. |
| @@ -558,7 +581,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
| 558 | unsigned aper_base, new_aper_base; | 581 | unsigned aper_base, new_aper_base; |
| 559 | struct pci_dev *dev; | 582 | struct pci_dev *dev; |
| 560 | void *gatt; | 583 | void *gatt; |
| 561 | int i; | 584 | int i, error; |
| 562 | 585 | ||
| 563 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | 586 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
| 564 | aper_size = aper_base = info->aper_size = 0; | 587 | aper_size = aper_base = info->aper_size = 0; |
| @@ -606,6 +629,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
| 606 | 629 | ||
| 607 | pci_write_config_dword(dev, 0x90, ctl); | 630 | pci_write_config_dword(dev, 0x90, ctl); |
| 608 | } | 631 | } |
| 632 | |||
| 633 | error = sysdev_class_register(&gart_sysdev_class); | ||
| 634 | if (!error) | ||
| 635 | error = sysdev_register(&device_gart); | ||
| 636 | if (error) | ||
| 637 | panic("Could not register gart_sysdev -- would corrupt data on next suspend"); | ||
| 609 | flush_gart(); | 638 | flush_gart(); |
| 610 | 639 | ||
| 611 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | 640 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index f8476dfbb60d..6d5483356e74 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
| @@ -649,8 +649,11 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct | |||
| 649 | /* If the task has used fpu the last 5 timeslices, just do a full | 649 | /* If the task has used fpu the last 5 timeslices, just do a full |
| 650 | * restore of the math state immediately to avoid the trap; the | 650 | * restore of the math state immediately to avoid the trap; the |
| 651 | * chances of needing FPU soon are obviously high now | 651 | * chances of needing FPU soon are obviously high now |
| 652 | * | ||
| 653 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
| 654 | * which can sleep in the case of !tsk_used_math() | ||
| 652 | */ | 655 | */ |
| 653 | if (next_p->fpu_counter > 5) | 656 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) |
| 654 | math_state_restore(); | 657 | math_state_restore(); |
| 655 | 658 | ||
| 656 | /* | 659 | /* |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index e2319f39988b..ac54ff56df80 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -658,8 +658,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 658 | /* If the task has used fpu the last 5 timeslices, just do a full | 658 | /* If the task has used fpu the last 5 timeslices, just do a full |
| 659 | * restore of the math state immediately to avoid the trap; the | 659 | * restore of the math state immediately to avoid the trap; the |
| 660 | * chances of needing FPU soon are obviously high now | 660 | * chances of needing FPU soon are obviously high now |
| 661 | * | ||
| 662 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
| 663 | * which can sleep in the case of !tsk_used_math() | ||
| 661 | */ | 664 | */ |
| 662 | if (next_p->fpu_counter>5) | 665 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) |
| 663 | math_state_restore(); | 666 | math_state_restore(); |
| 664 | return prev_p; | 667 | return prev_p; |
| 665 | } | 668 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 38988491c622..56078d61c793 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -1190,6 +1190,7 @@ static void __init smp_cpu_index_default(void) | |||
| 1190 | */ | 1190 | */ |
| 1191 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | 1191 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
| 1192 | { | 1192 | { |
| 1193 | preempt_disable(); | ||
| 1193 | nmi_watchdog_default(); | 1194 | nmi_watchdog_default(); |
| 1194 | smp_cpu_index_default(); | 1195 | smp_cpu_index_default(); |
| 1195 | current_cpu_data = boot_cpu_data; | 1196 | current_cpu_data = boot_cpu_data; |
| @@ -1206,7 +1207,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
| 1206 | if (smp_sanity_check(max_cpus) < 0) { | 1207 | if (smp_sanity_check(max_cpus) < 0) { |
| 1207 | printk(KERN_INFO "SMP disabled\n"); | 1208 | printk(KERN_INFO "SMP disabled\n"); |
| 1208 | disable_smp(); | 1209 | disable_smp(); |
| 1209 | return; | 1210 | goto out; |
| 1210 | } | 1211 | } |
| 1211 | 1212 | ||
| 1212 | preempt_disable(); | 1213 | preempt_disable(); |
| @@ -1246,6 +1247,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
| 1246 | printk(KERN_INFO "CPU%d: ", 0); | 1247 | printk(KERN_INFO "CPU%d: ", 0); |
| 1247 | print_cpu_info(&cpu_data(0)); | 1248 | print_cpu_info(&cpu_data(0)); |
| 1248 | setup_boot_clock(); | 1249 | setup_boot_clock(); |
| 1250 | out: | ||
| 1251 | preempt_enable(); | ||
| 1249 | } | 1252 | } |
| 1250 | /* | 1253 | /* |
| 1251 | * Early setup to make printk work. | 1254 | * Early setup to make printk work. |
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c index 4535e6d147ad..d710f2d167bb 100644 --- a/arch/x86/lib/delay_32.c +++ b/arch/x86/lib/delay_32.c | |||
| @@ -44,13 +44,36 @@ static void delay_loop(unsigned long loops) | |||
| 44 | static void delay_tsc(unsigned long loops) | 44 | static void delay_tsc(unsigned long loops) |
| 45 | { | 45 | { |
| 46 | unsigned long bclock, now; | 46 | unsigned long bclock, now; |
| 47 | int cpu; | ||
| 47 | 48 | ||
| 48 | preempt_disable(); /* TSC's are per-cpu */ | 49 | preempt_disable(); |
| 50 | cpu = smp_processor_id(); | ||
| 49 | rdtscl(bclock); | 51 | rdtscl(bclock); |
| 50 | do { | 52 | for (;;) { |
| 51 | rep_nop(); | ||
| 52 | rdtscl(now); | 53 | rdtscl(now); |
| 53 | } while ((now-bclock) < loops); | 54 | if ((now - bclock) >= loops) |
| 55 | break; | ||
| 56 | |||
| 57 | /* Allow RT tasks to run */ | ||
| 58 | preempt_enable(); | ||
| 59 | rep_nop(); | ||
| 60 | preempt_disable(); | ||
| 61 | |||
| 62 | /* | ||
| 63 | * It is possible that we moved to another CPU, and | ||
| 64 | * since TSC's are per-cpu we need to calculate | ||
| 65 | * that. The delay must guarantee that we wait "at | ||
| 66 | * least" the amount of time. Being moved to another | ||
| 67 | * CPU could make the wait longer but we just need to | ||
| 68 | * make sure we waited long enough. Rebalance the | ||
| 69 | * counter for this CPU. | ||
| 70 | */ | ||
| 71 | if (unlikely(cpu != smp_processor_id())) { | ||
| 72 | loops -= (now - bclock); | ||
| 73 | cpu = smp_processor_id(); | ||
| 74 | rdtscl(bclock); | ||
| 75 | } | ||
| 76 | } | ||
| 54 | preempt_enable(); | 77 | preempt_enable(); |
| 55 | } | 78 | } |
| 56 | 79 | ||
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c index bbc610518516..4c441be92641 100644 --- a/arch/x86/lib/delay_64.c +++ b/arch/x86/lib/delay_64.c | |||
| @@ -31,14 +31,36 @@ int __devinit read_current_timer(unsigned long *timer_value) | |||
| 31 | void __delay(unsigned long loops) | 31 | void __delay(unsigned long loops) |
| 32 | { | 32 | { |
| 33 | unsigned bclock, now; | 33 | unsigned bclock, now; |
| 34 | int cpu; | ||
| 34 | 35 | ||
| 35 | preempt_disable(); /* TSC's are pre-cpu */ | 36 | preempt_disable(); |
| 37 | cpu = smp_processor_id(); | ||
| 36 | rdtscl(bclock); | 38 | rdtscl(bclock); |
| 37 | do { | 39 | for (;;) { |
| 38 | rep_nop(); | ||
| 39 | rdtscl(now); | 40 | rdtscl(now); |
| 41 | if ((now - bclock) >= loops) | ||
| 42 | break; | ||
| 43 | |||
| 44 | /* Allow RT tasks to run */ | ||
| 45 | preempt_enable(); | ||
| 46 | rep_nop(); | ||
| 47 | preempt_disable(); | ||
| 48 | |||
| 49 | /* | ||
| 50 | * It is possible that we moved to another CPU, and | ||
| 51 | * since TSC's are per-cpu we need to calculate | ||
| 52 | * that. The delay must guarantee that we wait "at | ||
| 53 | * least" the amount of time. Being moved to another | ||
| 54 | * CPU could make the wait longer but we just need to | ||
| 55 | * make sure we waited long enough. Rebalance the | ||
| 56 | * counter for this CPU. | ||
| 57 | */ | ||
| 58 | if (unlikely(cpu != smp_processor_id())) { | ||
| 59 | loops -= (now - bclock); | ||
| 60 | cpu = smp_processor_id(); | ||
| 61 | rdtscl(bclock); | ||
| 62 | } | ||
| 40 | } | 63 | } |
| 41 | while ((now-bclock) < loops); | ||
| 42 | preempt_enable(); | 64 | preempt_enable(); |
| 43 | } | 65 | } |
| 44 | EXPORT_SYMBOL(__delay); | 66 | EXPORT_SYMBOL(__delay); |
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index 6e38d877ea77..c7b06feb139b 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
| 31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
| 32 | #include <asm/user.h> | 32 | #include <asm/user.h> |
| 33 | #include <asm/i387.h> | ||
| 33 | 34 | ||
| 34 | #include "fpu_system.h" | 35 | #include "fpu_system.h" |
| 35 | #include "fpu_emu.h" | 36 | #include "fpu_emu.h" |
| @@ -146,6 +147,13 @@ asmlinkage void math_emulate(long arg) | |||
| 146 | unsigned long code_limit = 0; /* Initialized to stop compiler warnings */ | 147 | unsigned long code_limit = 0; /* Initialized to stop compiler warnings */ |
| 147 | struct desc_struct code_descriptor; | 148 | struct desc_struct code_descriptor; |
| 148 | 149 | ||
| 150 | if (!used_math()) { | ||
| 151 | if (init_fpu(current)) { | ||
| 152 | do_group_exit(SIGKILL); | ||
| 153 | return; | ||
| 154 | } | ||
| 155 | } | ||
| 156 | |||
| 149 | #ifdef RE_ENTRANT_CHECKING | 157 | #ifdef RE_ENTRANT_CHECKING |
| 150 | if (emulating) { | 158 | if (emulating) { |
| 151 | printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n"); | 159 | printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n"); |
| @@ -153,11 +161,6 @@ asmlinkage void math_emulate(long arg) | |||
| 153 | RE_ENTRANT_CHECK_ON; | 161 | RE_ENTRANT_CHECK_ON; |
| 154 | #endif /* RE_ENTRANT_CHECKING */ | 162 | #endif /* RE_ENTRANT_CHECKING */ |
| 155 | 163 | ||
| 156 | if (!used_math()) { | ||
| 157 | finit(); | ||
| 158 | set_used_math(); | ||
| 159 | } | ||
| 160 | |||
| 161 | SETUP_DATA_AREA(arg); | 164 | SETUP_DATA_AREA(arg); |
| 162 | 165 | ||
| 163 | FPU_ORIG_EIP = FPU_EIP; | 166 | FPU_ORIG_EIP = FPU_EIP; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 32ba13b0f818..156e6d7b0e32 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -206,7 +206,7 @@ void __init cleanup_highmap(void) | |||
| 206 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | 206 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; |
| 207 | 207 | ||
| 208 | for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { | 208 | for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { |
| 209 | if (!pmd_present(*pmd)) | 209 | if (pmd_none(*pmd)) |
| 210 | continue; | 210 | continue; |
| 211 | if (vaddr < (unsigned long) _text || vaddr > end) | 211 | if (vaddr < (unsigned long) _text || vaddr > end) |
| 212 | set_pmd(pmd, __pmd(0)); | 212 | set_pmd(pmd, __pmd(0)); |
| @@ -506,7 +506,7 @@ early_param("memtest", parse_memtest); | |||
| 506 | 506 | ||
| 507 | static void __init early_memtest(unsigned long start, unsigned long end) | 507 | static void __init early_memtest(unsigned long start, unsigned long end) |
| 508 | { | 508 | { |
| 509 | unsigned long t_start, t_size; | 509 | u64 t_start, t_size; |
| 510 | unsigned pattern; | 510 | unsigned pattern; |
| 511 | 511 | ||
| 512 | if (!memtest_pattern) | 512 | if (!memtest_pattern) |
| @@ -525,7 +525,7 @@ static void __init early_memtest(unsigned long start, unsigned long end) | |||
| 525 | if (t_start + t_size > end) | 525 | if (t_start + t_size > end) |
| 526 | t_size = end - t_start; | 526 | t_size = end - t_start; |
| 527 | 527 | ||
| 528 | printk(KERN_CONT "\n %016lx - %016lx pattern %d", | 528 | printk(KERN_CONT "\n %016llx - %016llx pattern %d", |
| 529 | t_start, t_start + t_size, pattern); | 529 | t_start, t_start + t_size, pattern); |
| 530 | 530 | ||
| 531 | memtest(t_start, t_size, pattern); | 531 | memtest(t_start, t_size, pattern); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 71bb3159031a..2b2bb3f9b683 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
| @@ -593,10 +593,11 @@ void __init early_iounmap(void *addr, unsigned long size) | |||
| 593 | unsigned long offset; | 593 | unsigned long offset; |
| 594 | unsigned int nrpages; | 594 | unsigned int nrpages; |
| 595 | enum fixed_addresses idx; | 595 | enum fixed_addresses idx; |
| 596 | unsigned int nesting; | 596 | int nesting; |
| 597 | 597 | ||
| 598 | nesting = --early_ioremap_nested; | 598 | nesting = --early_ioremap_nested; |
| 599 | WARN_ON(nesting < 0); | 599 | if (WARN_ON(nesting < 0)) |
| 600 | return; | ||
| 600 | 601 | ||
| 601 | if (early_ioremap_debug) { | 602 | if (early_ioremap_debug) { |
| 602 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, | 603 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index de3a99812450..06b7a1c90fb8 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
| @@ -34,7 +34,7 @@ void __cpuinit pat_disable(char *reason) | |||
| 34 | printk(KERN_INFO "%s\n", reason); | 34 | printk(KERN_INFO "%s\n", reason); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static int nopat(char *str) | 37 | static int __init nopat(char *str) |
| 38 | { | 38 | { |
| 39 | pat_disable("PAT support disabled."); | 39 | pat_disable("PAT support disabled."); |
| 40 | return 0; | 40 | return 0; |
| @@ -151,32 +151,33 @@ static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, | |||
| 151 | unsigned long pat_type; | 151 | unsigned long pat_type; |
| 152 | u8 mtrr_type; | 152 | u8 mtrr_type; |
| 153 | 153 | ||
| 154 | mtrr_type = mtrr_type_lookup(start, end); | ||
| 155 | if (mtrr_type == 0xFF) { /* MTRR not enabled */ | ||
| 156 | *ret_prot = prot; | ||
| 157 | return 0; | ||
| 158 | } | ||
| 159 | if (mtrr_type == 0xFE) { /* MTRR match error */ | ||
| 160 | *ret_prot = _PAGE_CACHE_UC; | ||
| 161 | return -1; | ||
| 162 | } | ||
| 163 | if (mtrr_type != MTRR_TYPE_UNCACHABLE && | ||
| 164 | mtrr_type != MTRR_TYPE_WRBACK && | ||
| 165 | mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */ | ||
| 166 | *ret_prot = _PAGE_CACHE_UC; | ||
| 167 | return -1; | ||
| 168 | } | ||
| 169 | |||
| 170 | pat_type = prot & _PAGE_CACHE_MASK; | 154 | pat_type = prot & _PAGE_CACHE_MASK; |
| 171 | prot &= (~_PAGE_CACHE_MASK); | 155 | prot &= (~_PAGE_CACHE_MASK); |
| 172 | 156 | ||
| 173 | /* Currently doing intersection by hand. Optimize it later. */ | 157 | /* |
| 158 | * We return the PAT request directly for types where PAT takes | ||
| 159 | * precedence with respect to MTRR and for UC_MINUS. | ||
| 160 | * Consistency checks with other PAT requests is done later | ||
| 161 | * while going through memtype list. | ||
| 162 | */ | ||
| 174 | if (pat_type == _PAGE_CACHE_WC) { | 163 | if (pat_type == _PAGE_CACHE_WC) { |
| 175 | *ret_prot = prot | _PAGE_CACHE_WC; | 164 | *ret_prot = prot | _PAGE_CACHE_WC; |
| 165 | return 0; | ||
| 176 | } else if (pat_type == _PAGE_CACHE_UC_MINUS) { | 166 | } else if (pat_type == _PAGE_CACHE_UC_MINUS) { |
| 177 | *ret_prot = prot | _PAGE_CACHE_UC_MINUS; | 167 | *ret_prot = prot | _PAGE_CACHE_UC_MINUS; |
| 178 | } else if (pat_type == _PAGE_CACHE_UC || | 168 | return 0; |
| 179 | mtrr_type == MTRR_TYPE_UNCACHABLE) { | 169 | } else if (pat_type == _PAGE_CACHE_UC) { |
| 170 | *ret_prot = prot | _PAGE_CACHE_UC; | ||
| 171 | return 0; | ||
| 172 | } | ||
| 173 | |||
| 174 | /* | ||
| 175 | * Look for MTRR hint to get the effective type in case where PAT | ||
| 176 | * request is for WB. | ||
| 177 | */ | ||
| 178 | mtrr_type = mtrr_type_lookup(start, end); | ||
| 179 | |||
| 180 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) { | ||
| 180 | *ret_prot = prot | _PAGE_CACHE_UC; | 181 | *ret_prot = prot | _PAGE_CACHE_UC; |
| 181 | } else if (mtrr_type == MTRR_TYPE_WRCOMB) { | 182 | } else if (mtrr_type == MTRR_TYPE_WRCOMB) { |
| 182 | *ret_prot = prot | _PAGE_CACHE_WC; | 183 | *ret_prot = prot | _PAGE_CACHE_WC; |
| @@ -233,14 +234,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 233 | 234 | ||
| 234 | if (req_type == -1) { | 235 | if (req_type == -1) { |
| 235 | /* | 236 | /* |
| 236 | * Special case where caller wants to inherit from mtrr or | 237 | * Call mtrr_lookup to get the type hint. This is an |
| 237 | * existing pat mapping, defaulting to UC_MINUS in case of | 238 | * optimization for /dev/mem mmap'ers into WB memory (BIOS |
| 238 | * no match. | 239 | * tools and ACPI tools). Use WB request for WB memory and use |
| 240 | * UC_MINUS otherwise. | ||
| 239 | */ | 241 | */ |
| 240 | u8 mtrr_type = mtrr_type_lookup(start, end); | 242 | u8 mtrr_type = mtrr_type_lookup(start, end); |
| 241 | if (mtrr_type == 0xFE) { /* MTRR match error */ | ||
| 242 | err = -1; | ||
| 243 | } | ||
| 244 | 243 | ||
| 245 | if (mtrr_type == MTRR_TYPE_WRBACK) { | 244 | if (mtrr_type == MTRR_TYPE_WRBACK) { |
| 246 | req_type = _PAGE_CACHE_WB; | 245 | req_type = _PAGE_CACHE_WB; |
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 6b722d315936..37672f79dcc8 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
| @@ -193,6 +193,8 @@ static inline int restore_i387(struct _fpstate __user *buf) | |||
| 193 | 193 | ||
| 194 | #else /* CONFIG_X86_32 */ | 194 | #else /* CONFIG_X86_32 */ |
| 195 | 195 | ||
| 196 | extern void finit(void); | ||
| 197 | |||
| 196 | static inline void tolerant_fwait(void) | 198 | static inline void tolerant_fwait(void) |
| 197 | { | 199 | { |
| 198 | asm volatile("fnclex ; fwait"); | 200 | asm volatile("fnclex ; fwait"); |
