diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-06-12 05:27:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-12 05:27:22 -0400 |
commit | bb6dfb32f90094fea647e1f27d994a8b6ddd2766 (patch) | |
tree | e63681727543bcc3251bbc82b81a9cc822cc3f5e /arch/x86 | |
parent | 4f384f8bcdb5d618a0a68fb84c809e602c798b8f (diff) | |
parent | 5e70b7f3c24468bb1635b295945edb48ecd9656a (diff) |
Merge branch 'linus' into x86/gart
Diffstat (limited to 'arch/x86')
35 files changed, 293 insertions, 138 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fe361ae7ef2f..52e18e6d2ba0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -26,17 +26,10 @@ config X86 | |||
26 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 26 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
27 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 27 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
28 | 28 | ||
29 | config DEFCONFIG_LIST | 29 | config ARCH_DEFCONFIG |
30 | string | 30 | string |
31 | depends on X86_32 | 31 | default "arch/x86/configs/i386_defconfig" if X86_32 |
32 | option defconfig_list | 32 | default "arch/x86/configs/x86_64_defconfig" if X86_64 |
33 | default "arch/x86/configs/i386_defconfig" | ||
34 | |||
35 | config DEFCONFIG_LIST | ||
36 | string | ||
37 | depends on X86_64 | ||
38 | option defconfig_list | ||
39 | default "arch/x86/configs/x86_64_defconfig" | ||
40 | 33 | ||
41 | 34 | ||
42 | config GENERIC_LOCKBREAK | 35 | config GENERIC_LOCKBREAK |
@@ -1515,13 +1508,13 @@ config PCI_GOMMCONFIG | |||
1515 | config PCI_GODIRECT | 1508 | config PCI_GODIRECT |
1516 | bool "Direct" | 1509 | bool "Direct" |
1517 | 1510 | ||
1518 | config PCI_GOANY | ||
1519 | bool "Any" | ||
1520 | |||
1521 | config PCI_GOOLPC | 1511 | config PCI_GOOLPC |
1522 | bool "OLPC" | 1512 | bool "OLPC" |
1523 | depends on OLPC | 1513 | depends on OLPC |
1524 | 1514 | ||
1515 | config PCI_GOANY | ||
1516 | bool "Any" | ||
1517 | |||
1525 | endchoice | 1518 | endchoice |
1526 | 1519 | ||
1527 | config PCI_BIOS | 1520 | config PCI_BIOS |
@@ -1538,9 +1531,8 @@ config PCI_MMCONFIG | |||
1538 | depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY) | 1531 | depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY) |
1539 | 1532 | ||
1540 | config PCI_OLPC | 1533 | config PCI_OLPC |
1541 | bool | 1534 | def_bool y |
1542 | depends on PCI && PCI_GOOLPC | 1535 | depends on PCI && OLPC && (PCI_GOOLPC || PCI_GOANY) |
1543 | default y | ||
1544 | 1536 | ||
1545 | config PCI_DOMAINS | 1537 | config PCI_DOMAINS |
1546 | def_bool y | 1538 | def_bool y |
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index ac1e31ba4795..18363374d51a 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -6,15 +6,19 @@ config TRACE_IRQFLAGS_SUPPORT | |||
6 | source "lib/Kconfig.debug" | 6 | source "lib/Kconfig.debug" |
7 | 7 | ||
8 | config NONPROMISC_DEVMEM | 8 | config NONPROMISC_DEVMEM |
9 | bool "Disable promiscuous /dev/mem" | 9 | bool "Filter access to /dev/mem" |
10 | help | 10 | help |
11 | The /dev/mem file by default only allows userspace access to PCI | 11 | If this option is left off, you allow userspace access to all |
12 | space and the BIOS code and data regions. This is sufficient for | 12 | of memory, including kernel and userspace memory. Accidental |
13 | dosemu and X and all common users of /dev/mem. With this config | 13 | access to this is obviously disastrous, but specific access can |
14 | option, you allow userspace access to all of memory, including | 14 | be used by people debugging the kernel. |
15 | kernel and userspace memory. Accidental access to this is | 15 | |
16 | obviously disasterous, but specific access can be used by people | 16 | If this option is switched on, the /dev/mem file only allows |
17 | debugging the kernel. | 17 | userspace access to PCI space and the BIOS code and data regions. |
18 | This is sufficient for dosemu and X and all common users of | ||
19 | /dev/mem. | ||
20 | |||
21 | If in doubt, say Y. | ||
18 | 22 | ||
19 | config EARLY_PRINTK | 23 | config EARLY_PRINTK |
20 | bool "Early printk" if EMBEDDED | 24 | bool "Early printk" if EMBEDDED |
diff --git a/arch/x86/boot/printf.c b/arch/x86/boot/printf.c index c1d00c0274c4..50e47cdbdddd 100644 --- a/arch/x86/boot/printf.c +++ b/arch/x86/boot/printf.c | |||
@@ -56,7 +56,7 @@ static char *number(char *str, long num, int base, int size, int precision, | |||
56 | if (type & LEFT) | 56 | if (type & LEFT) |
57 | type &= ~ZEROPAD; | 57 | type &= ~ZEROPAD; |
58 | if (base < 2 || base > 36) | 58 | if (base < 2 || base > 36) |
59 | return 0; | 59 | return NULL; |
60 | c = (type & ZEROPAD) ? '0' : ' '; | 60 | c = (type & ZEROPAD) ? '0' : ' '; |
61 | sign = 0; | 61 | sign = 0; |
62 | if (type & SIGN) { | 62 | if (type & SIGN) { |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c49ebcc6c41e..33c5216fd3e1 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -242,12 +242,19 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) | |||
242 | 242 | ||
243 | static void __cpuinit acpi_register_lapic(int id, u8 enabled) | 243 | static void __cpuinit acpi_register_lapic(int id, u8 enabled) |
244 | { | 244 | { |
245 | unsigned int ver = 0; | ||
246 | |||
245 | if (!enabled) { | 247 | if (!enabled) { |
246 | ++disabled_cpus; | 248 | ++disabled_cpus; |
247 | return; | 249 | return; |
248 | } | 250 | } |
249 | 251 | ||
250 | generic_processor_info(id, 0); | 252 | #ifdef CONFIG_X86_32 |
253 | if (boot_cpu_physical_apicid != -1U) | ||
254 | ver = apic_version[boot_cpu_physical_apicid]; | ||
255 | #endif | ||
256 | |||
257 | generic_processor_info(id, ver); | ||
251 | } | 258 | } |
252 | 259 | ||
253 | static int __init | 260 | static int __init |
@@ -767,8 +774,13 @@ static void __init acpi_register_lapic_address(unsigned long address) | |||
767 | mp_lapic_addr = address; | 774 | mp_lapic_addr = address; |
768 | 775 | ||
769 | set_fixmap_nocache(FIX_APIC_BASE, address); | 776 | set_fixmap_nocache(FIX_APIC_BASE, address); |
770 | if (boot_cpu_physical_apicid == -1U) | 777 | if (boot_cpu_physical_apicid == -1U) { |
771 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 778 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); |
779 | #ifdef CONFIG_X86_32 | ||
780 | apic_version[boot_cpu_physical_apicid] = | ||
781 | GET_APIC_VERSION(apic_read(APIC_LVR)); | ||
782 | #endif | ||
783 | } | ||
772 | } | 784 | } |
773 | 785 | ||
774 | static int __init early_acpi_parse_madt_lapic_addr_ovr(void) | 786 | static int __init early_acpi_parse_madt_lapic_addr_ovr(void) |
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 5910020c3f24..0633cfd0dc29 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -534,7 +534,7 @@ int setup_profiling_timer(unsigned int multiplier) | |||
534 | */ | 534 | */ |
535 | void clear_local_APIC(void) | 535 | void clear_local_APIC(void) |
536 | { | 536 | { |
537 | int maxlvt = lapic_get_maxlvt(); | 537 | int maxlvt; |
538 | u32 v; | 538 | u32 v; |
539 | 539 | ||
540 | /* APIC hasn't been mapped yet */ | 540 | /* APIC hasn't been mapped yet */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index af4a867a097c..777a7ff075de 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c | |||
@@ -245,7 +245,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, | |||
245 | if ((ecx > 95) || (ecx == 0) || (eax < ebx)) | 245 | if ((ecx > 95) || (ecx == 0) || (eax < ebx)) |
246 | return -EIO; | 246 | return -EIO; |
247 | 247 | ||
248 | edx = (eax - ebx) / (100 - ecx); | 248 | edx = ((eax - ebx) * 100) / (100 - ecx); |
249 | *low_freq = edx * 1000; /* back to kHz */ | 249 | *low_freq = edx * 1000; /* back to kHz */ |
250 | 250 | ||
251 | dprintk("low frequency is %u kHz\n", *low_freq); | 251 | dprintk("low frequency is %u kHz\n", *low_freq); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 46d4034d9f37..206791eb46e3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1127,12 +1127,23 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1127 | * an UP version, and is deprecated by AMD. | 1127 | * an UP version, and is deprecated by AMD. |
1128 | */ | 1128 | */ |
1129 | if (num_online_cpus() != 1) { | 1129 | if (num_online_cpus() != 1) { |
1130 | printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n"); | 1130 | #ifndef CONFIG_ACPI_PROCESSOR |
1131 | printk(KERN_ERR PFX "ACPI Processor support is required " | ||
1132 | "for SMP systems but is absent. Please load the " | ||
1133 | "ACPI Processor module before starting this " | ||
1134 | "driver.\n"); | ||
1135 | #else | ||
1136 | printk(KERN_ERR PFX "Your BIOS does not provide ACPI " | ||
1137 | "_PSS objects in a way that Linux understands. " | ||
1138 | "Please report this to the Linux ACPI maintainers" | ||
1139 | " and complain to your BIOS vendor.\n"); | ||
1140 | #endif | ||
1131 | kfree(data); | 1141 | kfree(data); |
1132 | return -ENODEV; | 1142 | return -ENODEV; |
1133 | } | 1143 | } |
1134 | if (pol->cpu != 0) { | 1144 | if (pol->cpu != 0) { |
1135 | printk(KERN_ERR PFX "No _PSS objects for CPU other than CPU0\n"); | 1145 | printk(KERN_ERR PFX "No ACPI _PSS objects for CPU other than " |
1146 | "CPU0. Complain to your BIOS vendor.\n"); | ||
1136 | kfree(data); | 1147 | kfree(data); |
1137 | return -ENODEV; | 1148 | return -ENODEV; |
1138 | } | 1149 | } |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index e03cc952f233..eb9ddd8efb82 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -56,6 +56,11 @@ void __cpuinit mxcsr_feature_mask_init(void) | |||
56 | 56 | ||
57 | void __init init_thread_xstate(void) | 57 | void __init init_thread_xstate(void) |
58 | { | 58 | { |
59 | if (!HAVE_HWFP) { | ||
60 | xstate_size = sizeof(struct i387_soft_struct); | ||
61 | return; | ||
62 | } | ||
63 | |||
59 | if (cpu_has_fxsr) | 64 | if (cpu_has_fxsr) |
60 | xstate_size = sizeof(struct i387_fxsave_struct); | 65 | xstate_size = sizeof(struct i387_fxsave_struct); |
61 | #ifdef CONFIG_X86_32 | 66 | #ifdef CONFIG_X86_32 |
@@ -94,7 +99,7 @@ void __cpuinit fpu_init(void) | |||
94 | int init_fpu(struct task_struct *tsk) | 99 | int init_fpu(struct task_struct *tsk) |
95 | { | 100 | { |
96 | if (tsk_used_math(tsk)) { | 101 | if (tsk_used_math(tsk)) { |
97 | if (tsk == current) | 102 | if (HAVE_HWFP && tsk == current) |
98 | unlazy_fpu(tsk); | 103 | unlazy_fpu(tsk); |
99 | return 0; | 104 | return 0; |
100 | } | 105 | } |
@@ -109,6 +114,15 @@ int init_fpu(struct task_struct *tsk) | |||
109 | return -ENOMEM; | 114 | return -ENOMEM; |
110 | } | 115 | } |
111 | 116 | ||
117 | #ifdef CONFIG_X86_32 | ||
118 | if (!HAVE_HWFP) { | ||
119 | memset(tsk->thread.xstate, 0, xstate_size); | ||
120 | finit(); | ||
121 | set_stopped_child_used_math(tsk); | ||
122 | return 0; | ||
123 | } | ||
124 | #endif | ||
125 | |||
112 | if (cpu_has_fxsr) { | 126 | if (cpu_has_fxsr) { |
113 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | 127 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; |
114 | 128 | ||
@@ -330,13 +344,13 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
330 | struct user_i387_ia32_struct env; | 344 | struct user_i387_ia32_struct env; |
331 | int ret; | 345 | int ret; |
332 | 346 | ||
333 | if (!HAVE_HWFP) | ||
334 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); | ||
335 | |||
336 | ret = init_fpu(target); | 347 | ret = init_fpu(target); |
337 | if (ret) | 348 | if (ret) |
338 | return ret; | 349 | return ret; |
339 | 350 | ||
351 | if (!HAVE_HWFP) | ||
352 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); | ||
353 | |||
340 | if (!cpu_has_fxsr) { | 354 | if (!cpu_has_fxsr) { |
341 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 355 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
342 | &target->thread.xstate->fsave, 0, | 356 | &target->thread.xstate->fsave, 0, |
@@ -360,15 +374,15 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
360 | struct user_i387_ia32_struct env; | 374 | struct user_i387_ia32_struct env; |
361 | int ret; | 375 | int ret; |
362 | 376 | ||
363 | if (!HAVE_HWFP) | ||
364 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | ||
365 | |||
366 | ret = init_fpu(target); | 377 | ret = init_fpu(target); |
367 | if (ret) | 378 | if (ret) |
368 | return ret; | 379 | return ret; |
369 | 380 | ||
370 | set_stopped_child_used_math(target); | 381 | set_stopped_child_used_math(target); |
371 | 382 | ||
383 | if (!HAVE_HWFP) | ||
384 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | ||
385 | |||
372 | if (!cpu_has_fxsr) { | 386 | if (!cpu_has_fxsr) { |
373 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 387 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
374 | &target->thread.xstate->fsave, 0, -1); | 388 | &target->thread.xstate->fsave, 0, -1); |
@@ -474,18 +488,18 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
474 | int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | 488 | int restore_i387_ia32(struct _fpstate_ia32 __user *buf) |
475 | { | 489 | { |
476 | int err; | 490 | int err; |
491 | struct task_struct *tsk = current; | ||
477 | 492 | ||
478 | if (HAVE_HWFP) { | 493 | if (HAVE_HWFP) |
479 | struct task_struct *tsk = current; | ||
480 | |||
481 | clear_fpu(tsk); | 494 | clear_fpu(tsk); |
482 | 495 | ||
483 | if (!used_math()) { | 496 | if (!used_math()) { |
484 | err = init_fpu(tsk); | 497 | err = init_fpu(tsk); |
485 | if (err) | 498 | if (err) |
486 | return err; | 499 | return err; |
487 | } | 500 | } |
488 | 501 | ||
502 | if (HAVE_HWFP) { | ||
489 | if (cpu_has_fxsr) | 503 | if (cpu_has_fxsr) |
490 | err = restore_i387_fxsave(buf); | 504 | err = restore_i387_fxsave(buf); |
491 | else | 505 | else |
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3d01e47777db..a4f93b4120c1 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <asm/desc.h> | 11 | #include <asm/desc.h> |
12 | 12 | ||
13 | static struct fs_struct init_fs = INIT_FS; | 13 | static struct fs_struct init_fs = INIT_FS; |
14 | static struct files_struct init_files = INIT_FILES; | ||
15 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | 14 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
16 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | 15 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
17 | struct mm_struct init_mm = INIT_MM(init_mm); | 16 | struct mm_struct init_mm = INIT_MM(init_mm); |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 4bc1be5d5472..08a30986d472 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -53,7 +53,7 @@ static cycle_t kvm_clock_read(void); | |||
53 | * have elapsed since the hypervisor wrote the data. So we try to account for | 53 | * have elapsed since the hypervisor wrote the data. So we try to account for |
54 | * that with system time | 54 | * that with system time |
55 | */ | 55 | */ |
56 | unsigned long kvm_get_wallclock(void) | 56 | static unsigned long kvm_get_wallclock(void) |
57 | { | 57 | { |
58 | u32 wc_sec, wc_nsec; | 58 | u32 wc_sec, wc_nsec; |
59 | u64 delta; | 59 | u64 delta; |
@@ -86,7 +86,7 @@ unsigned long kvm_get_wallclock(void) | |||
86 | return ts.tv_sec + 1; | 86 | return ts.tv_sec + 1; |
87 | } | 87 | } |
88 | 88 | ||
89 | int kvm_set_wallclock(unsigned long now) | 89 | static int kvm_set_wallclock(unsigned long now) |
90 | { | 90 | { |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 1f99b62ff616..3710097f02eb 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/kdebug.h> | 26 | #include <linux/kdebug.h> |
27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/iommu-helper.h> | 28 | #include <linux/iommu-helper.h> |
29 | #include <linux/sysdev.h> | ||
29 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
31 | #include <asm/mtrr.h> | 32 | #include <asm/mtrr.h> |
@@ -548,6 +549,28 @@ static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) | |||
548 | return aper_base; | 549 | return aper_base; |
549 | } | 550 | } |
550 | 551 | ||
552 | static int gart_resume(struct sys_device *dev) | ||
553 | { | ||
554 | return 0; | ||
555 | } | ||
556 | |||
557 | static int gart_suspend(struct sys_device *dev, pm_message_t state) | ||
558 | { | ||
559 | return -EINVAL; | ||
560 | } | ||
561 | |||
562 | static struct sysdev_class gart_sysdev_class = { | ||
563 | .name = "gart", | ||
564 | .suspend = gart_suspend, | ||
565 | .resume = gart_resume, | ||
566 | |||
567 | }; | ||
568 | |||
569 | static struct sys_device device_gart = { | ||
570 | .id = 0, | ||
571 | .cls = &gart_sysdev_class, | ||
572 | }; | ||
573 | |||
551 | /* | 574 | /* |
552 | * Private Northbridge GATT initialization in case we cannot use the | 575 | * Private Northbridge GATT initialization in case we cannot use the |
553 | * AGP driver for some reason. | 576 | * AGP driver for some reason. |
@@ -558,7 +581,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
558 | unsigned aper_base, new_aper_base; | 581 | unsigned aper_base, new_aper_base; |
559 | struct pci_dev *dev; | 582 | struct pci_dev *dev; |
560 | void *gatt; | 583 | void *gatt; |
561 | int i; | 584 | int i, error; |
562 | 585 | ||
563 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | 586 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
564 | aper_size = aper_base = info->aper_size = 0; | 587 | aper_size = aper_base = info->aper_size = 0; |
@@ -595,6 +618,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
595 | dev = k8_northbridges[i]; | 618 | dev = k8_northbridges[i]; |
596 | enable_gart_translation(dev, __pa(gatt)); | 619 | enable_gart_translation(dev, __pa(gatt)); |
597 | } | 620 | } |
621 | |||
622 | error = sysdev_class_register(&gart_sysdev_class); | ||
623 | if (!error) | ||
624 | error = sysdev_register(&device_gart); | ||
625 | if (error) | ||
626 | panic("Could not register gart_sysdev -- would corrupt data on next suspend"); | ||
598 | flush_gart(); | 627 | flush_gart(); |
599 | 628 | ||
600 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | 629 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 67e9b4a1e89d..ba370dc8685b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -99,15 +99,6 @@ static void mwait_idle(void) | |||
99 | local_irq_enable(); | 99 | local_irq_enable(); |
100 | } | 100 | } |
101 | 101 | ||
102 | |||
103 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
104 | { | ||
105 | if (force_mwait) | ||
106 | return 1; | ||
107 | /* Any C1 states supported? */ | ||
108 | return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; | ||
109 | } | ||
110 | |||
111 | /* | 102 | /* |
112 | * On SMP it's slightly faster (but much more power-consuming!) | 103 | * On SMP it's slightly faster (but much more power-consuming!) |
113 | * to poll the ->work.need_resched flag instead of waiting for the | 104 | * to poll the ->work.need_resched flag instead of waiting for the |
@@ -119,6 +110,33 @@ static void poll_idle(void) | |||
119 | cpu_relax(); | 110 | cpu_relax(); |
120 | } | 111 | } |
121 | 112 | ||
113 | /* | ||
114 | * mwait selection logic: | ||
115 | * | ||
116 | * It depends on the CPU. For AMD CPUs that support MWAIT this is | ||
117 | * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings | ||
118 | * then depend on a clock divisor and current Pstate of the core. If | ||
119 | * all cores of a processor are in halt state (C1) the processor can | ||
120 | * enter the C1E (C1 enhanced) state. If mwait is used this will never | ||
121 | * happen. | ||
122 | * | ||
123 | * idle=mwait overrides this decision and forces the usage of mwait. | ||
124 | */ | ||
125 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
126 | { | ||
127 | if (force_mwait) | ||
128 | return 1; | ||
129 | |||
130 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
131 | switch(c->x86) { | ||
132 | case 0x10: | ||
133 | case 0x11: | ||
134 | return 0; | ||
135 | } | ||
136 | } | ||
137 | return 1; | ||
138 | } | ||
139 | |||
122 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 140 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
123 | { | 141 | { |
124 | static int selected; | 142 | static int selected; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index f8476dfbb60d..6d5483356e74 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -649,8 +649,11 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct | |||
649 | /* If the task has used fpu the last 5 timeslices, just do a full | 649 | /* If the task has used fpu the last 5 timeslices, just do a full |
650 | * restore of the math state immediately to avoid the trap; the | 650 | * restore of the math state immediately to avoid the trap; the |
651 | * chances of needing FPU soon are obviously high now | 651 | * chances of needing FPU soon are obviously high now |
652 | * | ||
653 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
654 | * which can sleep in the case of !tsk_used_math() | ||
652 | */ | 655 | */ |
653 | if (next_p->fpu_counter > 5) | 656 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) |
654 | math_state_restore(); | 657 | math_state_restore(); |
655 | 658 | ||
656 | /* | 659 | /* |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index e2319f39988b..ac54ff56df80 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -658,8 +658,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
658 | /* If the task has used fpu the last 5 timeslices, just do a full | 658 | /* If the task has used fpu the last 5 timeslices, just do a full |
659 | * restore of the math state immediately to avoid the trap; the | 659 | * restore of the math state immediately to avoid the trap; the |
660 | * chances of needing FPU soon are obviously high now | 660 | * chances of needing FPU soon are obviously high now |
661 | * | ||
662 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
663 | * which can sleep in the case of !tsk_used_math() | ||
661 | */ | 664 | */ |
662 | if (next_p->fpu_counter>5) | 665 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) |
663 | math_state_restore(); | 666 | math_state_restore(); |
664 | return prev_p; | 667 | return prev_p; |
665 | } | 668 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 38988491c622..56078d61c793 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1190,6 +1190,7 @@ static void __init smp_cpu_index_default(void) | |||
1190 | */ | 1190 | */ |
1191 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | 1191 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
1192 | { | 1192 | { |
1193 | preempt_disable(); | ||
1193 | nmi_watchdog_default(); | 1194 | nmi_watchdog_default(); |
1194 | smp_cpu_index_default(); | 1195 | smp_cpu_index_default(); |
1195 | current_cpu_data = boot_cpu_data; | 1196 | current_cpu_data = boot_cpu_data; |
@@ -1206,7 +1207,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1206 | if (smp_sanity_check(max_cpus) < 0) { | 1207 | if (smp_sanity_check(max_cpus) < 0) { |
1207 | printk(KERN_INFO "SMP disabled\n"); | 1208 | printk(KERN_INFO "SMP disabled\n"); |
1208 | disable_smp(); | 1209 | disable_smp(); |
1209 | return; | 1210 | goto out; |
1210 | } | 1211 | } |
1211 | 1212 | ||
1212 | preempt_disable(); | 1213 | preempt_disable(); |
@@ -1246,6 +1247,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1246 | printk(KERN_INFO "CPU%d: ", 0); | 1247 | printk(KERN_INFO "CPU%d: ", 0); |
1247 | print_cpu_info(&cpu_data(0)); | 1248 | print_cpu_info(&cpu_data(0)); |
1248 | setup_boot_clock(); | 1249 | setup_boot_clock(); |
1250 | out: | ||
1251 | preempt_enable(); | ||
1249 | } | 1252 | } |
1250 | /* | 1253 | /* |
1251 | * Early setup to make printk work. | 1254 | * Early setup to make printk work. |
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index e4790728b224..068759db63dd 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include "mach_timer.h" | 15 | #include "mach_timer.h" |
16 | 16 | ||
17 | static int tsc_enabled; | 17 | static int tsc_disabled; |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * On some systems the TSC frequency does not | 20 | * On some systems the TSC frequency does not |
@@ -28,8 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz); | |||
28 | static int __init tsc_setup(char *str) | 28 | static int __init tsc_setup(char *str) |
29 | { | 29 | { |
30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | 30 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " |
31 | "cannot disable TSC completely.\n"); | 31 | "cannot disable TSC completely.\n"); |
32 | mark_tsc_unstable("user disabled TSC"); | 32 | tsc_disabled = 1; |
33 | return 1; | 33 | return 1; |
34 | } | 34 | } |
35 | #else | 35 | #else |
@@ -120,7 +120,7 @@ unsigned long long native_sched_clock(void) | |||
120 | * very important for it to be as fast as the platform | 120 | * very important for it to be as fast as the platform |
121 | * can achive it. ) | 121 | * can achive it. ) |
122 | */ | 122 | */ |
123 | if (unlikely(!tsc_enabled && !tsc_unstable)) | 123 | if (unlikely(tsc_disabled)) |
124 | /* No locking but a rare wrong value is not a big deal: */ | 124 | /* No locking but a rare wrong value is not a big deal: */ |
125 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | 125 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
126 | 126 | ||
@@ -322,7 +322,6 @@ void mark_tsc_unstable(char *reason) | |||
322 | { | 322 | { |
323 | if (!tsc_unstable) { | 323 | if (!tsc_unstable) { |
324 | tsc_unstable = 1; | 324 | tsc_unstable = 1; |
325 | tsc_enabled = 0; | ||
326 | printk("Marking TSC unstable due to: %s.\n", reason); | 325 | printk("Marking TSC unstable due to: %s.\n", reason); |
327 | /* Can be called before registration */ | 326 | /* Can be called before registration */ |
328 | if (clocksource_tsc.mult) | 327 | if (clocksource_tsc.mult) |
@@ -336,7 +335,7 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |||
336 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | 335 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) |
337 | { | 336 | { |
338 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | 337 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", |
339 | d->ident); | 338 | d->ident); |
340 | tsc_unstable = 1; | 339 | tsc_unstable = 1; |
341 | return 0; | 340 | return 0; |
342 | } | 341 | } |
@@ -403,14 +402,22 @@ void __init tsc_init(void) | |||
403 | { | 402 | { |
404 | int cpu; | 403 | int cpu; |
405 | 404 | ||
406 | if (!cpu_has_tsc) | 405 | if (!cpu_has_tsc || tsc_disabled) { |
406 | /* Disable the TSC in case of !cpu_has_tsc */ | ||
407 | tsc_disabled = 1; | ||
407 | return; | 408 | return; |
409 | } | ||
408 | 410 | ||
409 | cpu_khz = calculate_cpu_khz(); | 411 | cpu_khz = calculate_cpu_khz(); |
410 | tsc_khz = cpu_khz; | 412 | tsc_khz = cpu_khz; |
411 | 413 | ||
412 | if (!cpu_khz) { | 414 | if (!cpu_khz) { |
413 | mark_tsc_unstable("could not calculate TSC khz"); | 415 | mark_tsc_unstable("could not calculate TSC khz"); |
416 | /* | ||
417 | * We need to disable the TSC completely in this case | ||
418 | * to prevent sched_clock() from using it. | ||
419 | */ | ||
420 | tsc_disabled = 1; | ||
414 | return; | 421 | return; |
415 | } | 422 | } |
416 | 423 | ||
@@ -441,8 +448,6 @@ void __init tsc_init(void) | |||
441 | if (check_tsc_unstable()) { | 448 | if (check_tsc_unstable()) { |
442 | clocksource_tsc.rating = 0; | 449 | clocksource_tsc.rating = 0; |
443 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | 450 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; |
444 | } else | 451 | } |
445 | tsc_enabled = 1; | ||
446 | |||
447 | clocksource_register(&clocksource_tsc); | 452 | clocksource_register(&clocksource_tsc); |
448 | } | 453 | } |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index fcc16e58609e..1784b8077a12 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -227,14 +227,14 @@ void __init tsc_calibrate(void) | |||
227 | /* hpet or pmtimer available ? */ | 227 | /* hpet or pmtimer available ? */ |
228 | if (!hpet && !pm1 && !pm2) { | 228 | if (!hpet && !pm1 && !pm2) { |
229 | printk(KERN_INFO "TSC calibrated against PIT\n"); | 229 | printk(KERN_INFO "TSC calibrated against PIT\n"); |
230 | return; | 230 | goto out; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* Check, whether the sampling was disturbed by an SMI */ | 233 | /* Check, whether the sampling was disturbed by an SMI */ |
234 | if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { | 234 | if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { |
235 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | 235 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " |
236 | "using PIT calibration result\n"); | 236 | "using PIT calibration result\n"); |
237 | return; | 237 | goto out; |
238 | } | 238 | } |
239 | 239 | ||
240 | tsc2 = (tsc2 - tsc1) * 1000000L; | 240 | tsc2 = (tsc2 - tsc1) * 1000000L; |
@@ -255,6 +255,7 @@ void __init tsc_calibrate(void) | |||
255 | 255 | ||
256 | tsc_khz = tsc2 / tsc1; | 256 | tsc_khz = tsc2 / tsc1; |
257 | 257 | ||
258 | out: | ||
258 | for_each_possible_cpu(cpu) | 259 | for_each_possible_cpu(cpu) |
259 | set_cyc2ns_scale(tsc_khz, cpu); | 260 | set_cyc2ns_scale(tsc_khz, cpu); |
260 | } | 261 | } |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 3324d90038e4..7c077a9d9777 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -216,7 +216,7 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu) | |||
216 | { | 216 | { |
217 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | 217 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; |
218 | 218 | ||
219 | if (pit && vcpu->vcpu_id == 0) | 219 | if (pit && vcpu->vcpu_id == 0 && pit->pit_state.inject_pending) |
220 | return atomic_read(&pit->pit_state.pit_timer.pending); | 220 | return atomic_read(&pit->pit_state.pit_timer.pending); |
221 | 221 | ||
222 | return 0; | 222 | return 0; |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 36809d79788b..c297c50eba63 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -957,7 +957,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) | |||
957 | { | 957 | { |
958 | struct kvm_lapic *lapic = vcpu->arch.apic; | 958 | struct kvm_lapic *lapic = vcpu->arch.apic; |
959 | 959 | ||
960 | if (lapic) | 960 | if (lapic && apic_enabled(lapic) && apic_lvt_enabled(lapic, APIC_LVTT)) |
961 | return atomic_read(&lapic->timer.pending); | 961 | return atomic_read(&lapic->timer.pending); |
962 | 962 | ||
963 | return 0; | 963 | return 0; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 36c5406b1813..7246b60afb96 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1996,7 +1996,7 @@ static struct shrinker mmu_shrinker = { | |||
1996 | .seeks = DEFAULT_SEEKS * 10, | 1996 | .seeks = DEFAULT_SEEKS * 10, |
1997 | }; | 1997 | }; |
1998 | 1998 | ||
1999 | void mmu_destroy_caches(void) | 1999 | static void mmu_destroy_caches(void) |
2000 | { | 2000 | { |
2001 | if (pte_chain_cache) | 2001 | if (pte_chain_cache) |
2002 | kmem_cache_destroy(pte_chain_cache); | 2002 | kmem_cache_destroy(pte_chain_cache); |
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index f2a696d6a243..8a96320ab071 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c | |||
@@ -677,8 +677,9 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
677 | c->use_modrm_ea = 1; | 677 | c->use_modrm_ea = 1; |
678 | 678 | ||
679 | if (c->modrm_mod == 3) { | 679 | if (c->modrm_mod == 3) { |
680 | c->modrm_val = *(unsigned long *) | 680 | c->modrm_ptr = decode_register(c->modrm_rm, |
681 | decode_register(c->modrm_rm, c->regs, c->d & ByteOp); | 681 | c->regs, c->d & ByteOp); |
682 | c->modrm_val = *(unsigned long *)c->modrm_ptr; | ||
682 | return rc; | 683 | return rc; |
683 | } | 684 | } |
684 | 685 | ||
@@ -1005,6 +1006,7 @@ done_prefixes: | |||
1005 | if ((c->d & ModRM) && c->modrm_mod == 3) { | 1006 | if ((c->d & ModRM) && c->modrm_mod == 3) { |
1006 | c->src.type = OP_REG; | 1007 | c->src.type = OP_REG; |
1007 | c->src.val = c->modrm_val; | 1008 | c->src.val = c->modrm_val; |
1009 | c->src.ptr = c->modrm_ptr; | ||
1008 | break; | 1010 | break; |
1009 | } | 1011 | } |
1010 | c->src.type = OP_MEM; | 1012 | c->src.type = OP_MEM; |
@@ -1049,6 +1051,7 @@ done_prefixes: | |||
1049 | if ((c->d & ModRM) && c->modrm_mod == 3) { | 1051 | if ((c->d & ModRM) && c->modrm_mod == 3) { |
1050 | c->dst.type = OP_REG; | 1052 | c->dst.type = OP_REG; |
1051 | c->dst.val = c->dst.orig_val = c->modrm_val; | 1053 | c->dst.val = c->dst.orig_val = c->modrm_val; |
1054 | c->dst.ptr = c->modrm_ptr; | ||
1052 | break; | 1055 | break; |
1053 | } | 1056 | } |
1054 | c->dst.type = OP_MEM; | 1057 | c->dst.type = OP_MEM; |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index af65b2da3ba0..5c7e2fd52075 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -582,8 +582,9 @@ static void __init lguest_init_IRQ(void) | |||
582 | int vector = FIRST_EXTERNAL_VECTOR + i; | 582 | int vector = FIRST_EXTERNAL_VECTOR + i; |
583 | if (vector != SYSCALL_VECTOR) { | 583 | if (vector != SYSCALL_VECTOR) { |
584 | set_intr_gate(vector, interrupt[i]); | 584 | set_intr_gate(vector, interrupt[i]); |
585 | set_irq_chip_and_handler(i, &lguest_irq_controller, | 585 | set_irq_chip_and_handler_name(i, &lguest_irq_controller, |
586 | handle_level_irq); | 586 | handle_level_irq, |
587 | "level"); | ||
587 | } | 588 | } |
588 | } | 589 | } |
589 | /* This call is required to set up for 4k stacks, where we have | 590 | /* This call is required to set up for 4k stacks, where we have |
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c index 4535e6d147ad..d710f2d167bb 100644 --- a/arch/x86/lib/delay_32.c +++ b/arch/x86/lib/delay_32.c | |||
@@ -44,13 +44,36 @@ static void delay_loop(unsigned long loops) | |||
44 | static void delay_tsc(unsigned long loops) | 44 | static void delay_tsc(unsigned long loops) |
45 | { | 45 | { |
46 | unsigned long bclock, now; | 46 | unsigned long bclock, now; |
47 | int cpu; | ||
47 | 48 | ||
48 | preempt_disable(); /* TSC's are per-cpu */ | 49 | preempt_disable(); |
50 | cpu = smp_processor_id(); | ||
49 | rdtscl(bclock); | 51 | rdtscl(bclock); |
50 | do { | 52 | for (;;) { |
51 | rep_nop(); | ||
52 | rdtscl(now); | 53 | rdtscl(now); |
53 | } while ((now-bclock) < loops); | 54 | if ((now - bclock) >= loops) |
55 | break; | ||
56 | |||
57 | /* Allow RT tasks to run */ | ||
58 | preempt_enable(); | ||
59 | rep_nop(); | ||
60 | preempt_disable(); | ||
61 | |||
62 | /* | ||
63 | * It is possible that we moved to another CPU, and | ||
64 | * since TSC's are per-cpu we need to calculate | ||
65 | * that. The delay must guarantee that we wait "at | ||
66 | * least" the amount of time. Being moved to another | ||
67 | * CPU could make the wait longer but we just need to | ||
68 | * make sure we waited long enough. Rebalance the | ||
69 | * counter for this CPU. | ||
70 | */ | ||
71 | if (unlikely(cpu != smp_processor_id())) { | ||
72 | loops -= (now - bclock); | ||
73 | cpu = smp_processor_id(); | ||
74 | rdtscl(bclock); | ||
75 | } | ||
76 | } | ||
54 | preempt_enable(); | 77 | preempt_enable(); |
55 | } | 78 | } |
56 | 79 | ||
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c index bbc610518516..4c441be92641 100644 --- a/arch/x86/lib/delay_64.c +++ b/arch/x86/lib/delay_64.c | |||
@@ -31,14 +31,36 @@ int __devinit read_current_timer(unsigned long *timer_value) | |||
31 | void __delay(unsigned long loops) | 31 | void __delay(unsigned long loops) |
32 | { | 32 | { |
33 | unsigned bclock, now; | 33 | unsigned bclock, now; |
34 | int cpu; | ||
34 | 35 | ||
35 | preempt_disable(); /* TSC's are pre-cpu */ | 36 | preempt_disable(); |
37 | cpu = smp_processor_id(); | ||
36 | rdtscl(bclock); | 38 | rdtscl(bclock); |
37 | do { | 39 | for (;;) { |
38 | rep_nop(); | ||
39 | rdtscl(now); | 40 | rdtscl(now); |
41 | if ((now - bclock) >= loops) | ||
42 | break; | ||
43 | |||
44 | /* Allow RT tasks to run */ | ||
45 | preempt_enable(); | ||
46 | rep_nop(); | ||
47 | preempt_disable(); | ||
48 | |||
49 | /* | ||
50 | * It is possible that we moved to another CPU, and | ||
51 | * since TSC's are per-cpu we need to calculate | ||
52 | * that. The delay must guarantee that we wait "at | ||
53 | * least" the amount of time. Being moved to another | ||
54 | * CPU could make the wait longer but we just need to | ||
55 | * make sure we waited long enough. Rebalance the | ||
56 | * counter for this CPU. | ||
57 | */ | ||
58 | if (unlikely(cpu != smp_processor_id())) { | ||
59 | loops -= (now - bclock); | ||
60 | cpu = smp_processor_id(); | ||
61 | rdtscl(bclock); | ||
62 | } | ||
40 | } | 63 | } |
41 | while ((now-bclock) < loops); | ||
42 | preempt_enable(); | 64 | preempt_enable(); |
43 | } | 65 | } |
44 | EXPORT_SYMBOL(__delay); | 66 | EXPORT_SYMBOL(__delay); |
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index 6e38d877ea77..c7b06feb139b 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
32 | #include <asm/user.h> | 32 | #include <asm/user.h> |
33 | #include <asm/i387.h> | ||
33 | 34 | ||
34 | #include "fpu_system.h" | 35 | #include "fpu_system.h" |
35 | #include "fpu_emu.h" | 36 | #include "fpu_emu.h" |
@@ -146,6 +147,13 @@ asmlinkage void math_emulate(long arg) | |||
146 | unsigned long code_limit = 0; /* Initialized to stop compiler warnings */ | 147 | unsigned long code_limit = 0; /* Initialized to stop compiler warnings */ |
147 | struct desc_struct code_descriptor; | 148 | struct desc_struct code_descriptor; |
148 | 149 | ||
150 | if (!used_math()) { | ||
151 | if (init_fpu(current)) { | ||
152 | do_group_exit(SIGKILL); | ||
153 | return; | ||
154 | } | ||
155 | } | ||
156 | |||
149 | #ifdef RE_ENTRANT_CHECKING | 157 | #ifdef RE_ENTRANT_CHECKING |
150 | if (emulating) { | 158 | if (emulating) { |
151 | printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n"); | 159 | printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n"); |
@@ -153,11 +161,6 @@ asmlinkage void math_emulate(long arg) | |||
153 | RE_ENTRANT_CHECK_ON; | 161 | RE_ENTRANT_CHECK_ON; |
154 | #endif /* RE_ENTRANT_CHECKING */ | 162 | #endif /* RE_ENTRANT_CHECKING */ |
155 | 163 | ||
156 | if (!used_math()) { | ||
157 | finit(); | ||
158 | set_used_math(); | ||
159 | } | ||
160 | |||
161 | SETUP_DATA_AREA(arg); | 164 | SETUP_DATA_AREA(arg); |
162 | 165 | ||
163 | FPU_ORIG_EIP = FPU_EIP; | 166 | FPU_ORIG_EIP = FPU_EIP; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 32ba13b0f818..156e6d7b0e32 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -206,7 +206,7 @@ void __init cleanup_highmap(void) | |||
206 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | 206 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; |
207 | 207 | ||
208 | for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { | 208 | for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { |
209 | if (!pmd_present(*pmd)) | 209 | if (pmd_none(*pmd)) |
210 | continue; | 210 | continue; |
211 | if (vaddr < (unsigned long) _text || vaddr > end) | 211 | if (vaddr < (unsigned long) _text || vaddr > end) |
212 | set_pmd(pmd, __pmd(0)); | 212 | set_pmd(pmd, __pmd(0)); |
@@ -506,7 +506,7 @@ early_param("memtest", parse_memtest); | |||
506 | 506 | ||
507 | static void __init early_memtest(unsigned long start, unsigned long end) | 507 | static void __init early_memtest(unsigned long start, unsigned long end) |
508 | { | 508 | { |
509 | unsigned long t_start, t_size; | 509 | u64 t_start, t_size; |
510 | unsigned pattern; | 510 | unsigned pattern; |
511 | 511 | ||
512 | if (!memtest_pattern) | 512 | if (!memtest_pattern) |
@@ -525,7 +525,7 @@ static void __init early_memtest(unsigned long start, unsigned long end) | |||
525 | if (t_start + t_size > end) | 525 | if (t_start + t_size > end) |
526 | t_size = end - t_start; | 526 | t_size = end - t_start; |
527 | 527 | ||
528 | printk(KERN_CONT "\n %016lx - %016lx pattern %d", | 528 | printk(KERN_CONT "\n %016llx - %016llx pattern %d", |
529 | t_start, t_start + t_size, pattern); | 529 | t_start, t_start + t_size, pattern); |
530 | 530 | ||
531 | memtest(t_start, t_size, pattern); | 531 | memtest(t_start, t_size, pattern); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 71bb3159031a..2b2bb3f9b683 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -593,10 +593,11 @@ void __init early_iounmap(void *addr, unsigned long size) | |||
593 | unsigned long offset; | 593 | unsigned long offset; |
594 | unsigned int nrpages; | 594 | unsigned int nrpages; |
595 | enum fixed_addresses idx; | 595 | enum fixed_addresses idx; |
596 | unsigned int nesting; | 596 | int nesting; |
597 | 597 | ||
598 | nesting = --early_ioremap_nested; | 598 | nesting = --early_ioremap_nested; |
599 | WARN_ON(nesting < 0); | 599 | if (WARN_ON(nesting < 0)) |
600 | return; | ||
600 | 601 | ||
601 | if (early_ioremap_debug) { | 602 | if (early_ioremap_debug) { |
602 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, | 603 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index bcb1a8e4b2db..06b7a1c90fb8 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -28,13 +28,13 @@ | |||
28 | #ifdef CONFIG_X86_PAT | 28 | #ifdef CONFIG_X86_PAT |
29 | int __read_mostly pat_wc_enabled = 1; | 29 | int __read_mostly pat_wc_enabled = 1; |
30 | 30 | ||
31 | void __init pat_disable(char *reason) | 31 | void __cpuinit pat_disable(char *reason) |
32 | { | 32 | { |
33 | pat_wc_enabled = 0; | 33 | pat_wc_enabled = 0; |
34 | printk(KERN_INFO "%s\n", reason); | 34 | printk(KERN_INFO "%s\n", reason); |
35 | } | 35 | } |
36 | 36 | ||
37 | static int nopat(char *str) | 37 | static int __init nopat(char *str) |
38 | { | 38 | { |
39 | pat_disable("PAT support disabled."); | 39 | pat_disable("PAT support disabled."); |
40 | return 0; | 40 | return 0; |
@@ -151,32 +151,33 @@ static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, | |||
151 | unsigned long pat_type; | 151 | unsigned long pat_type; |
152 | u8 mtrr_type; | 152 | u8 mtrr_type; |
153 | 153 | ||
154 | mtrr_type = mtrr_type_lookup(start, end); | ||
155 | if (mtrr_type == 0xFF) { /* MTRR not enabled */ | ||
156 | *ret_prot = prot; | ||
157 | return 0; | ||
158 | } | ||
159 | if (mtrr_type == 0xFE) { /* MTRR match error */ | ||
160 | *ret_prot = _PAGE_CACHE_UC; | ||
161 | return -1; | ||
162 | } | ||
163 | if (mtrr_type != MTRR_TYPE_UNCACHABLE && | ||
164 | mtrr_type != MTRR_TYPE_WRBACK && | ||
165 | mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */ | ||
166 | *ret_prot = _PAGE_CACHE_UC; | ||
167 | return -1; | ||
168 | } | ||
169 | |||
170 | pat_type = prot & _PAGE_CACHE_MASK; | 154 | pat_type = prot & _PAGE_CACHE_MASK; |
171 | prot &= (~_PAGE_CACHE_MASK); | 155 | prot &= (~_PAGE_CACHE_MASK); |
172 | 156 | ||
173 | /* Currently doing intersection by hand. Optimize it later. */ | 157 | /* |
158 | * We return the PAT request directly for types where PAT takes | ||
159 | * precedence with respect to MTRR and for UC_MINUS. | ||
160 | * Consistency checks with other PAT requests is done later | ||
161 | * while going through memtype list. | ||
162 | */ | ||
174 | if (pat_type == _PAGE_CACHE_WC) { | 163 | if (pat_type == _PAGE_CACHE_WC) { |
175 | *ret_prot = prot | _PAGE_CACHE_WC; | 164 | *ret_prot = prot | _PAGE_CACHE_WC; |
165 | return 0; | ||
176 | } else if (pat_type == _PAGE_CACHE_UC_MINUS) { | 166 | } else if (pat_type == _PAGE_CACHE_UC_MINUS) { |
177 | *ret_prot = prot | _PAGE_CACHE_UC_MINUS; | 167 | *ret_prot = prot | _PAGE_CACHE_UC_MINUS; |
178 | } else if (pat_type == _PAGE_CACHE_UC || | 168 | return 0; |
179 | mtrr_type == MTRR_TYPE_UNCACHABLE) { | 169 | } else if (pat_type == _PAGE_CACHE_UC) { |
170 | *ret_prot = prot | _PAGE_CACHE_UC; | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Look for MTRR hint to get the effective type in case where PAT | ||
176 | * request is for WB. | ||
177 | */ | ||
178 | mtrr_type = mtrr_type_lookup(start, end); | ||
179 | |||
180 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) { | ||
180 | *ret_prot = prot | _PAGE_CACHE_UC; | 181 | *ret_prot = prot | _PAGE_CACHE_UC; |
181 | } else if (mtrr_type == MTRR_TYPE_WRCOMB) { | 182 | } else if (mtrr_type == MTRR_TYPE_WRCOMB) { |
182 | *ret_prot = prot | _PAGE_CACHE_WC; | 183 | *ret_prot = prot | _PAGE_CACHE_WC; |
@@ -233,14 +234,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
233 | 234 | ||
234 | if (req_type == -1) { | 235 | if (req_type == -1) { |
235 | /* | 236 | /* |
236 | * Special case where caller wants to inherit from mtrr or | 237 | * Call mtrr_lookup to get the type hint. This is an |
237 | * existing pat mapping, defaulting to UC_MINUS in case of | 238 | * optimization for /dev/mem mmap'ers into WB memory (BIOS |
238 | * no match. | 239 | * tools and ACPI tools). Use WB request for WB memory and use |
240 | * UC_MINUS otherwise. | ||
239 | */ | 241 | */ |
240 | u8 mtrr_type = mtrr_type_lookup(start, end); | 242 | u8 mtrr_type = mtrr_type_lookup(start, end); |
241 | if (mtrr_type == 0xFE) { /* MTRR match error */ | ||
242 | err = -1; | ||
243 | } | ||
244 | 243 | ||
245 | if (mtrr_type == MTRR_TYPE_WRBACK) { | 244 | if (mtrr_type == MTRR_TYPE_WRBACK) { |
246 | req_type = _PAGE_CACHE_WB; | 245 | req_type = _PAGE_CACHE_WB; |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 8545c8a9d107..6e64aaf00d1d 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -302,18 +302,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { | |||
302 | }, | 302 | }, |
303 | { | 303 | { |
304 | .callback = set_bf_sort, | 304 | .callback = set_bf_sort, |
305 | .ident = "HP ProLiant DL385 G2", | 305 | .ident = "HP ProLiant DL360", |
306 | .matches = { | 306 | .matches = { |
307 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | 307 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
308 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"), | 308 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"), |
309 | }, | 309 | }, |
310 | }, | 310 | }, |
311 | { | 311 | { |
312 | .callback = set_bf_sort, | 312 | .callback = set_bf_sort, |
313 | .ident = "HP ProLiant DL585 G2", | 313 | .ident = "HP ProLiant DL380", |
314 | .matches = { | 314 | .matches = { |
315 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | 315 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
316 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), | 316 | DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"), |
317 | }, | 317 | }, |
318 | }, | 318 | }, |
319 | #ifdef __i386__ | 319 | #ifdef __i386__ |
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index e70b9c57b88e..b821f4462d99 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c | |||
@@ -15,7 +15,8 @@ static __init int pci_access_init(void) | |||
15 | pci_mmcfg_early_init(); | 15 | pci_mmcfg_early_init(); |
16 | 16 | ||
17 | #ifdef CONFIG_PCI_OLPC | 17 | #ifdef CONFIG_PCI_OLPC |
18 | pci_olpc_init(); | 18 | if (!pci_olpc_init()) |
19 | return 0; /* skip additional checks if it's an XO */ | ||
19 | #endif | 20 | #endif |
20 | #ifdef CONFIG_PCI_BIOS | 21 | #ifdef CONFIG_PCI_BIOS |
21 | pci_pcbios_init(); | 22 | pci_pcbios_init(); |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 0908fca901bf..ca8df9c260bc 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -621,6 +621,13 @@ static __init int via_router_probe(struct irq_router *r, | |||
621 | */ | 621 | */ |
622 | device = PCI_DEVICE_ID_VIA_8235; | 622 | device = PCI_DEVICE_ID_VIA_8235; |
623 | break; | 623 | break; |
624 | case PCI_DEVICE_ID_VIA_8237: | ||
625 | /** | ||
626 | * Asus a7v600 bios wrongly reports 8237 | ||
627 | * as 586-compatible | ||
628 | */ | ||
629 | device = PCI_DEVICE_ID_VIA_8237; | ||
630 | break; | ||
624 | } | 631 | } |
625 | } | 632 | } |
626 | 633 | ||
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c index 5e7636558c02..e11e9e803d5f 100644 --- a/arch/x86/pci/olpc.c +++ b/arch/x86/pci/olpc.c | |||
@@ -302,12 +302,13 @@ static struct pci_raw_ops pci_olpc_conf = { | |||
302 | .write = pci_olpc_write, | 302 | .write = pci_olpc_write, |
303 | }; | 303 | }; |
304 | 304 | ||
305 | void __init pci_olpc_init(void) | 305 | int __init pci_olpc_init(void) |
306 | { | 306 | { |
307 | if (!machine_is_olpc() || olpc_has_vsa()) | 307 | if (!machine_is_olpc() || olpc_has_vsa()) |
308 | return; | 308 | return -ENODEV; |
309 | 309 | ||
310 | printk(KERN_INFO "PCI: Using configuration type OLPC\n"); | 310 | printk(KERN_INFO "PCI: Using configuration type OLPC\n"); |
311 | raw_pci_ops = &pci_olpc_conf; | 311 | raw_pci_ops = &pci_olpc_conf; |
312 | is_lx = is_geode_lx(); | 312 | is_lx = is_geode_lx(); |
313 | return 0; | ||
313 | } | 314 | } |
diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h index f3972b12c60a..720c4c554534 100644 --- a/arch/x86/pci/pci.h +++ b/arch/x86/pci/pci.h | |||
@@ -101,7 +101,7 @@ extern struct pci_raw_ops pci_direct_conf1; | |||
101 | extern int pci_direct_probe(void); | 101 | extern int pci_direct_probe(void); |
102 | extern void pci_direct_init(int type); | 102 | extern void pci_direct_init(int type); |
103 | extern void pci_pcbios_init(void); | 103 | extern void pci_pcbios_init(void); |
104 | extern void pci_olpc_init(void); | 104 | extern int pci_olpc_init(void); |
105 | 105 | ||
106 | /* pci-mmconfig.c */ | 106 | /* pci-mmconfig.c */ |
107 | 107 | ||
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 23476c2ebfc4..efa2ba7c6005 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -106,9 +106,9 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | |||
106 | do_realtime((struct timespec *)tv); | 106 | do_realtime((struct timespec *)tv); |
107 | tv->tv_usec /= 1000; | 107 | tv->tv_usec /= 1000; |
108 | if (unlikely(tz != NULL)) { | 108 | if (unlikely(tz != NULL)) { |
109 | /* This relies on gcc inlining the memcpy. We'll notice | 109 | /* Avoid memcpy. Some old compilers fail to inline it */ |
110 | if it ever fails to do so. */ | 110 | tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; |
111 | memcpy(tz, >od->sys_tz, sizeof(struct timezone)); | 111 | tz->tz_dsttime = gtod->sys_tz.tz_dsttime; |
112 | } | 112 | } |
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 126766d43aea..3525ef523a74 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -60,7 +60,7 @@ xmaddr_t arbitrary_virt_to_machine(unsigned long address) | |||
60 | { | 60 | { |
61 | unsigned int level; | 61 | unsigned int level; |
62 | pte_t *pte = lookup_address(address, &level); | 62 | pte_t *pte = lookup_address(address, &level); |
63 | unsigned offset = address & PAGE_MASK; | 63 | unsigned offset = address & ~PAGE_MASK; |
64 | 64 | ||
65 | BUG_ON(pte == NULL); | 65 | BUG_ON(pte == NULL); |
66 | 66 | ||