aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-16 05:24:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-16 05:24:00 -0400
commit6d72b7952fa7d7c61d021398970c29afde6a4443 (patch)
tree31c00be8e2837e2db2e62c694421a93a9f4c79d7 /arch/x86
parent6360b1fbb4a939efd34fc770c2ebd927c55506e0 (diff)
parent066519068ad2fbe98c7f45552b1f592903a9c8c8 (diff)
Merge branch 'linus' into core/rodata
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig24
-rw-r--r--arch/x86/Kconfig.debug20
-rw-r--r--arch/x86/boot/a20.c3
-rw-r--r--arch/x86/boot/printf.c2
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/apic_64.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longrun.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c15
-rw-r--r--arch/x86/kernel/entry_32.S1
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/i387.c44
-rw-r--r--arch/x86/kernel/io_apic_32.c12
-rw-r--r--arch/x86/kernel/kvmclock.c4
-rw-r--r--arch/x86/kernel/mfgpt_32.c2
-rw-r--r--arch/x86/kernel/nmi_32.c9
-rw-r--r--arch/x86/kernel/pci-dma.c14
-rw-r--r--arch/x86/kernel/pci-gart_64.c31
-rw-r--r--arch/x86/kernel/process_32.c5
-rw-r--r--arch/x86/kernel/process_64.c5
-rw-r--r--arch/x86/kernel/rtc.c34
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kernel/traps_32.c1
-rw-r--r--arch/x86/kernel/tsc_32.c25
-rw-r--r--arch/x86/kernel/tsc_64.c5
-rw-r--r--arch/x86/kvm/i8254.c14
-rw-r--r--arch/x86/kvm/irq.c6
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/kvm/x86_emulate.c3
-rw-r--r--arch/x86/lguest/boot.c5
-rw-r--r--arch/x86/lib/delay_32.c31
-rw-r--r--arch/x86/lib/delay_64.c30
-rw-r--r--arch/x86/math-emu/fpu_entry.c13
-rw-r--r--arch/x86/mm/fault.c5
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/ioremap.c5
-rw-r--r--arch/x86/mm/pat.c51
-rw-r--r--arch/x86/mm/srat_64.c27
-rw-r--r--arch/x86/pci/common.c8
-rw-r--r--arch/x86/pci/init.c3
-rw-r--r--arch/x86/pci/irq.c7
-rw-r--r--arch/x86/pci/olpc.c5
-rw-r--r--arch/x86/pci/pci.h2
-rw-r--r--arch/x86/vdso/vclock_gettime.c6
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/time.c13
50 files changed, 350 insertions, 191 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fe361ae7ef2f..52e18e6d2ba0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,17 +26,10 @@ config X86
26 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 26 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
27 select HAVE_ARCH_KGDB if !X86_VOYAGER 27 select HAVE_ARCH_KGDB if !X86_VOYAGER
28 28
29config DEFCONFIG_LIST 29config ARCH_DEFCONFIG
30 string 30 string
31 depends on X86_32 31 default "arch/x86/configs/i386_defconfig" if X86_32
32 option defconfig_list 32 default "arch/x86/configs/x86_64_defconfig" if X86_64
33 default "arch/x86/configs/i386_defconfig"
34
35config DEFCONFIG_LIST
36 string
37 depends on X86_64
38 option defconfig_list
39 default "arch/x86/configs/x86_64_defconfig"
40 33
41 34
42config GENERIC_LOCKBREAK 35config GENERIC_LOCKBREAK
@@ -1515,13 +1508,13 @@ config PCI_GOMMCONFIG
1515config PCI_GODIRECT 1508config PCI_GODIRECT
1516 bool "Direct" 1509 bool "Direct"
1517 1510
1518config PCI_GOANY
1519 bool "Any"
1520
1521config PCI_GOOLPC 1511config PCI_GOOLPC
1522 bool "OLPC" 1512 bool "OLPC"
1523 depends on OLPC 1513 depends on OLPC
1524 1514
1515config PCI_GOANY
1516 bool "Any"
1517
1525endchoice 1518endchoice
1526 1519
1527config PCI_BIOS 1520config PCI_BIOS
@@ -1538,9 +1531,8 @@ config PCI_MMCONFIG
1538 depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY) 1531 depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
1539 1532
1540config PCI_OLPC 1533config PCI_OLPC
1541 bool 1534 def_bool y
1542 depends on PCI && PCI_GOOLPC 1535 depends on PCI && OLPC && (PCI_GOOLPC || PCI_GOANY)
1543 default y
1544 1536
1545config PCI_DOMAINS 1537config PCI_DOMAINS
1546 def_bool y 1538 def_bool y
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index ac1e31ba4795..18363374d51a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -6,15 +6,19 @@ config TRACE_IRQFLAGS_SUPPORT
6source "lib/Kconfig.debug" 6source "lib/Kconfig.debug"
7 7
8config NONPROMISC_DEVMEM 8config NONPROMISC_DEVMEM
9 bool "Disable promiscuous /dev/mem" 9 bool "Filter access to /dev/mem"
10 help 10 help
11 The /dev/mem file by default only allows userspace access to PCI 11 If this option is left off, you allow userspace access to all
12 space and the BIOS code and data regions. This is sufficient for 12 of memory, including kernel and userspace memory. Accidental
13 dosemu and X and all common users of /dev/mem. With this config 13 access to this is obviously disastrous, but specific access can
14 option, you allow userspace access to all of memory, including 14 be used by people debugging the kernel.
15 kernel and userspace memory. Accidental access to this is 15
16 obviously disasterous, but specific access can be used by people 16 If this option is switched on, the /dev/mem file only allows
17 debugging the kernel. 17 userspace access to PCI space and the BIOS code and data regions.
18 This is sufficient for dosemu and X and all common users of
19 /dev/mem.
20
21 If in doubt, say Y.
18 22
19config EARLY_PRINTK 23config EARLY_PRINTK
20 bool "Early printk" if EMBEDDED 24 bool "Early printk" if EMBEDDED
diff --git a/arch/x86/boot/a20.c b/arch/x86/boot/a20.c
index 90943f83e84d..e01aafd03bde 100644
--- a/arch/x86/boot/a20.c
+++ b/arch/x86/boot/a20.c
@@ -115,8 +115,6 @@ static void enable_a20_fast(void)
115 115
116int enable_a20(void) 116int enable_a20(void)
117{ 117{
118 int loops = A20_ENABLE_LOOPS;
119
120#if defined(CONFIG_X86_ELAN) 118#if defined(CONFIG_X86_ELAN)
121 /* Elan croaks if we try to touch the KBC */ 119 /* Elan croaks if we try to touch the KBC */
122 enable_a20_fast(); 120 enable_a20_fast();
@@ -128,6 +126,7 @@ int enable_a20(void)
128 enable_a20_kbc(); 126 enable_a20_kbc();
129 return 0; 127 return 0;
130#else 128#else
129 int loops = A20_ENABLE_LOOPS;
131 while (loops--) { 130 while (loops--) {
132 /* First, check to see if A20 is already enabled 131 /* First, check to see if A20 is already enabled
133 (legacy free, etc.) */ 132 (legacy free, etc.) */
diff --git a/arch/x86/boot/printf.c b/arch/x86/boot/printf.c
index c1d00c0274c4..50e47cdbdddd 100644
--- a/arch/x86/boot/printf.c
+++ b/arch/x86/boot/printf.c
@@ -56,7 +56,7 @@ static char *number(char *str, long num, int base, int size, int precision,
56 if (type & LEFT) 56 if (type & LEFT)
57 type &= ~ZEROPAD; 57 type &= ~ZEROPAD;
58 if (base < 2 || base > 36) 58 if (base < 2 || base > 36)
59 return 0; 59 return NULL;
60 c = (type & ZEROPAD) ? '0' : ' '; 60 c = (type & ZEROPAD) ? '0' : ' ';
61 sign = 0; 61 sign = 0;
62 if (type & SIGN) { 62 if (type & SIGN) {
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index c49ebcc6c41e..33c5216fd3e1 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -242,12 +242,19 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
242 242
243static void __cpuinit acpi_register_lapic(int id, u8 enabled) 243static void __cpuinit acpi_register_lapic(int id, u8 enabled)
244{ 244{
245 unsigned int ver = 0;
246
245 if (!enabled) { 247 if (!enabled) {
246 ++disabled_cpus; 248 ++disabled_cpus;
247 return; 249 return;
248 } 250 }
249 251
250 generic_processor_info(id, 0); 252#ifdef CONFIG_X86_32
253 if (boot_cpu_physical_apicid != -1U)
254 ver = apic_version[boot_cpu_physical_apicid];
255#endif
256
257 generic_processor_info(id, ver);
251} 258}
252 259
253static int __init 260static int __init
@@ -767,8 +774,13 @@ static void __init acpi_register_lapic_address(unsigned long address)
767 mp_lapic_addr = address; 774 mp_lapic_addr = address;
768 775
769 set_fixmap_nocache(FIX_APIC_BASE, address); 776 set_fixmap_nocache(FIX_APIC_BASE, address);
770 if (boot_cpu_physical_apicid == -1U) 777 if (boot_cpu_physical_apicid == -1U) {
771 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); 778 boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
779#ifdef CONFIG_X86_32
780 apic_version[boot_cpu_physical_apicid] =
781 GET_APIC_VERSION(apic_read(APIC_LVR));
782#endif
783 }
772} 784}
773 785
774static int __init early_acpi_parse_madt_lapic_addr_ovr(void) 786static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 5910020c3f24..0633cfd0dc29 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -534,7 +534,7 @@ int setup_profiling_timer(unsigned int multiplier)
534 */ 534 */
535void clear_local_APIC(void) 535void clear_local_APIC(void)
536{ 536{
537 int maxlvt = lapic_get_maxlvt(); 537 int maxlvt;
538 u32 v; 538 u32 v;
539 539
540 /* APIC hasn't been mapped yet */ 540 /* APIC hasn't been mapped yet */
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c
index af4a867a097c..777a7ff075de 100644
--- a/arch/x86/kernel/cpu/cpufreq/longrun.c
+++ b/arch/x86/kernel/cpu/cpufreq/longrun.c
@@ -245,7 +245,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
245 if ((ecx > 95) || (ecx == 0) || (eax < ebx)) 245 if ((ecx > 95) || (ecx == 0) || (eax < ebx))
246 return -EIO; 246 return -EIO;
247 247
248 edx = (eax - ebx) / (100 - ecx); 248 edx = ((eax - ebx) * 100) / (100 - ecx);
249 *low_freq = edx * 1000; /* back to kHz */ 249 *low_freq = edx * 1000; /* back to kHz */
250 250
251 dprintk("low frequency is %u kHz\n", *low_freq); 251 dprintk("low frequency is %u kHz\n", *low_freq);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 46d4034d9f37..206791eb46e3 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1127,12 +1127,23 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1127 * an UP version, and is deprecated by AMD. 1127 * an UP version, and is deprecated by AMD.
1128 */ 1128 */
1129 if (num_online_cpus() != 1) { 1129 if (num_online_cpus() != 1) {
1130 printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n"); 1130#ifndef CONFIG_ACPI_PROCESSOR
1131 printk(KERN_ERR PFX "ACPI Processor support is required "
1132 "for SMP systems but is absent. Please load the "
1133 "ACPI Processor module before starting this "
1134 "driver.\n");
1135#else
1136 printk(KERN_ERR PFX "Your BIOS does not provide ACPI "
1137 "_PSS objects in a way that Linux understands. "
1138 "Please report this to the Linux ACPI maintainers"
1139 " and complain to your BIOS vendor.\n");
1140#endif
1131 kfree(data); 1141 kfree(data);
1132 return -ENODEV; 1142 return -ENODEV;
1133 } 1143 }
1134 if (pol->cpu != 0) { 1144 if (pol->cpu != 0) {
1135 printk(KERN_ERR PFX "No _PSS objects for CPU other than CPU0\n"); 1145 printk(KERN_ERR PFX "No ACPI _PSS objects for CPU other than "
1146 "CPU0. Complain to your BIOS vendor.\n");
1136 kfree(data); 1147 kfree(data);
1137 return -ENODEV; 1148 return -ENODEV;
1138 } 1149 }
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 2a609dc3271c..c778e4fa55a2 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -248,6 +248,7 @@ ENTRY(resume_userspace)
248 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt 248 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
249 # setting need_resched or sigpending 249 # setting need_resched or sigpending
250 # between sampling and the iret 250 # between sampling and the iret
251 TRACE_IRQS_OFF
251 movl TI_flags(%ebp), %ecx 252 movl TI_flags(%ebp), %ecx
252 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on 253 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
253 # int/exception return? 254 # int/exception return?
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b2cc73768a9d..f7357cc0162c 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -189,7 +189,7 @@ default_entry:
189 * this stage. 189 * this stage.
190 */ 190 */
191 191
192#define KPMDS ((0x100000000-__PAGE_OFFSET) >> 30) /* Number of kernel PMDs */ 192#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
193 193
194 xorl %ebx,%ebx /* %ebx is kept at zero */ 194 xorl %ebx,%ebx /* %ebx is kept at zero */
195 195
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index e03cc952f233..eb9ddd8efb82 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -56,6 +56,11 @@ void __cpuinit mxcsr_feature_mask_init(void)
56 56
57void __init init_thread_xstate(void) 57void __init init_thread_xstate(void)
58{ 58{
59 if (!HAVE_HWFP) {
60 xstate_size = sizeof(struct i387_soft_struct);
61 return;
62 }
63
59 if (cpu_has_fxsr) 64 if (cpu_has_fxsr)
60 xstate_size = sizeof(struct i387_fxsave_struct); 65 xstate_size = sizeof(struct i387_fxsave_struct);
61#ifdef CONFIG_X86_32 66#ifdef CONFIG_X86_32
@@ -94,7 +99,7 @@ void __cpuinit fpu_init(void)
94int init_fpu(struct task_struct *tsk) 99int init_fpu(struct task_struct *tsk)
95{ 100{
96 if (tsk_used_math(tsk)) { 101 if (tsk_used_math(tsk)) {
97 if (tsk == current) 102 if (HAVE_HWFP && tsk == current)
98 unlazy_fpu(tsk); 103 unlazy_fpu(tsk);
99 return 0; 104 return 0;
100 } 105 }
@@ -109,6 +114,15 @@ int init_fpu(struct task_struct *tsk)
109 return -ENOMEM; 114 return -ENOMEM;
110 } 115 }
111 116
117#ifdef CONFIG_X86_32
118 if (!HAVE_HWFP) {
119 memset(tsk->thread.xstate, 0, xstate_size);
120 finit();
121 set_stopped_child_used_math(tsk);
122 return 0;
123 }
124#endif
125
112 if (cpu_has_fxsr) { 126 if (cpu_has_fxsr) {
113 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; 127 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
114 128
@@ -330,13 +344,13 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
330 struct user_i387_ia32_struct env; 344 struct user_i387_ia32_struct env;
331 int ret; 345 int ret;
332 346
333 if (!HAVE_HWFP)
334 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
335
336 ret = init_fpu(target); 347 ret = init_fpu(target);
337 if (ret) 348 if (ret)
338 return ret; 349 return ret;
339 350
351 if (!HAVE_HWFP)
352 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
353
340 if (!cpu_has_fxsr) { 354 if (!cpu_has_fxsr) {
341 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 355 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
342 &target->thread.xstate->fsave, 0, 356 &target->thread.xstate->fsave, 0,
@@ -360,15 +374,15 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
360 struct user_i387_ia32_struct env; 374 struct user_i387_ia32_struct env;
361 int ret; 375 int ret;
362 376
363 if (!HAVE_HWFP)
364 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
365
366 ret = init_fpu(target); 377 ret = init_fpu(target);
367 if (ret) 378 if (ret)
368 return ret; 379 return ret;
369 380
370 set_stopped_child_used_math(target); 381 set_stopped_child_used_math(target);
371 382
383 if (!HAVE_HWFP)
384 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
385
372 if (!cpu_has_fxsr) { 386 if (!cpu_has_fxsr) {
373 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 387 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
374 &target->thread.xstate->fsave, 0, -1); 388 &target->thread.xstate->fsave, 0, -1);
@@ -474,18 +488,18 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
474int restore_i387_ia32(struct _fpstate_ia32 __user *buf) 488int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
475{ 489{
476 int err; 490 int err;
491 struct task_struct *tsk = current;
477 492
478 if (HAVE_HWFP) { 493 if (HAVE_HWFP)
479 struct task_struct *tsk = current;
480
481 clear_fpu(tsk); 494 clear_fpu(tsk);
482 495
483 if (!used_math()) { 496 if (!used_math()) {
484 err = init_fpu(tsk); 497 err = init_fpu(tsk);
485 if (err) 498 if (err)
486 return err; 499 return err;
487 } 500 }
488 501
502 if (HAVE_HWFP) {
489 if (cpu_has_fxsr) 503 if (cpu_has_fxsr)
490 err = restore_i387_fxsave(buf); 504 err = restore_i387_fxsave(buf);
491 else 505 else
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index a40d54fc1fdd..4dc8600d9d20 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -2130,14 +2130,10 @@ static inline void __init check_timer(void)
2130{ 2130{
2131 int apic1, pin1, apic2, pin2; 2131 int apic1, pin1, apic2, pin2;
2132 int vector; 2132 int vector;
2133 unsigned int ver;
2134 unsigned long flags; 2133 unsigned long flags;
2135 2134
2136 local_irq_save(flags); 2135 local_irq_save(flags);
2137 2136
2138 ver = apic_read(APIC_LVR);
2139 ver = GET_APIC_VERSION(ver);
2140
2141 /* 2137 /*
2142 * get/set the timer IRQ vector: 2138 * get/set the timer IRQ vector:
2143 */ 2139 */
@@ -2150,15 +2146,11 @@ static inline void __init check_timer(void)
2150 * mode for the 8259A whenever interrupts are routed 2146 * mode for the 8259A whenever interrupts are routed
2151 * through I/O APICs. Also IRQ0 has to be enabled in 2147 * through I/O APICs. Also IRQ0 has to be enabled in
2152 * the 8259A which implies the virtual wire has to be 2148 * the 8259A which implies the virtual wire has to be
2153 * disabled in the local APIC. Finally timer interrupts 2149 * disabled in the local APIC.
2154 * need to be acknowledged manually in the 8259A for
2155 * timer_interrupt() and for the i82489DX when using
2156 * the NMI watchdog.
2157 */ 2150 */
2158 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2151 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2159 init_8259A(1); 2152 init_8259A(1);
2160 timer_ack = !cpu_has_tsc; 2153 timer_ack = 1;
2161 timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2162 if (timer_over_8254 > 0) 2154 if (timer_over_8254 > 0)
2163 enable_8259A_irq(0); 2155 enable_8259A_irq(0);
2164 2156
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 4bc1be5d5472..08a30986d472 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -53,7 +53,7 @@ static cycle_t kvm_clock_read(void);
53 * have elapsed since the hypervisor wrote the data. So we try to account for 53 * have elapsed since the hypervisor wrote the data. So we try to account for
54 * that with system time 54 * that with system time
55 */ 55 */
56unsigned long kvm_get_wallclock(void) 56static unsigned long kvm_get_wallclock(void)
57{ 57{
58 u32 wc_sec, wc_nsec; 58 u32 wc_sec, wc_nsec;
59 u64 delta; 59 u64 delta;
@@ -86,7 +86,7 @@ unsigned long kvm_get_wallclock(void)
86 return ts.tv_sec + 1; 86 return ts.tv_sec + 1;
87} 87}
88 88
89int kvm_set_wallclock(unsigned long now) 89static int kvm_set_wallclock(unsigned long now)
90{ 90{
91 return 0; 91 return 0;
92} 92}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 3cad17fe026b..07c0f828f488 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -155,6 +155,7 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
155 wrmsr(msr, value, dummy); 155 wrmsr(msr, value, dummy);
156 return 0; 156 return 0;
157} 157}
158EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
158 159
159int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable) 160int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable)
160{ 161{
@@ -222,6 +223,7 @@ int geode_mfgpt_alloc_timer(int timer, int domain)
222 /* No timers available - too bad */ 223 /* No timers available - too bad */
223 return -1; 224 return -1;
224} 225}
226EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
225 227
226 228
227#ifdef CONFIG_GEODE_MFGPT_TIMER 229#ifdef CONFIG_GEODE_MFGPT_TIMER
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index 11b14bbaa61e..84160f74eeb0 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -26,7 +26,6 @@
26 26
27#include <asm/smp.h> 27#include <asm/smp.h>
28#include <asm/nmi.h> 28#include <asm/nmi.h>
29#include <asm/timer.h>
30 29
31#include "mach_traps.h" 30#include "mach_traps.h"
32 31
@@ -82,7 +81,7 @@ int __init check_nmi_watchdog(void)
82 81
83 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); 82 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
84 if (!prev_nmi_count) 83 if (!prev_nmi_count)
85 goto error; 84 return -1;
86 85
87 printk(KERN_INFO "Testing NMI watchdog ... "); 86 printk(KERN_INFO "Testing NMI watchdog ... ");
88 87
@@ -119,7 +118,7 @@ int __init check_nmi_watchdog(void)
119 if (!atomic_read(&nmi_active)) { 118 if (!atomic_read(&nmi_active)) {
120 kfree(prev_nmi_count); 119 kfree(prev_nmi_count);
121 atomic_set(&nmi_active, -1); 120 atomic_set(&nmi_active, -1);
122 goto error; 121 return -1;
123 } 122 }
124 printk("OK.\n"); 123 printk("OK.\n");
125 124
@@ -130,10 +129,6 @@ int __init check_nmi_watchdog(void)
130 129
131 kfree(prev_nmi_count); 130 kfree(prev_nmi_count);
132 return 0; 131 return 0;
133error:
134 timer_ack = !cpu_has_tsc;
135
136 return -1;
137} 132}
138 133
139static int __init setup_nmi_watchdog(char *str) 134static int __init setup_nmi_watchdog(char *str)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index c5ef1af8e79d..dc00a1331ace 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -378,6 +378,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
378 struct page *page; 378 struct page *page;
379 unsigned long dma_mask = 0; 379 unsigned long dma_mask = 0;
380 dma_addr_t bus; 380 dma_addr_t bus;
381 int noretry = 0;
381 382
382 /* ignore region specifiers */ 383 /* ignore region specifiers */
383 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 384 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -397,20 +398,25 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
397 if (dev->dma_mask == NULL) 398 if (dev->dma_mask == NULL)
398 return NULL; 399 return NULL;
399 400
400 /* Don't invoke OOM killer */ 401 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
401 gfp |= __GFP_NORETRY; 402 if (gfp & __GFP_DMA)
403 noretry = 1;
402 404
403#ifdef CONFIG_X86_64 405#ifdef CONFIG_X86_64
404 /* Why <=? Even when the mask is smaller than 4GB it is often 406 /* Why <=? Even when the mask is smaller than 4GB it is often
405 larger than 16MB and in this case we have a chance of 407 larger than 16MB and in this case we have a chance of
406 finding fitting memory in the next higher zone first. If 408 finding fitting memory in the next higher zone first. If
407 not retry with true GFP_DMA. -AK */ 409 not retry with true GFP_DMA. -AK */
408 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) 410 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
409 gfp |= GFP_DMA32; 411 gfp |= GFP_DMA32;
412 if (dma_mask < DMA_32BIT_MASK)
413 noretry = 1;
414 }
410#endif 415#endif
411 416
412 again: 417 again:
413 page = dma_alloc_pages(dev, gfp, get_order(size)); 418 page = dma_alloc_pages(dev,
419 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
414 if (page == NULL) 420 if (page == NULL)
415 return NULL; 421 return NULL;
416 422
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index c07455d1695f..aa8ec928caa8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -26,6 +26,7 @@
26#include <linux/kdebug.h> 26#include <linux/kdebug.h>
27#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
28#include <linux/iommu-helper.h> 28#include <linux/iommu-helper.h>
29#include <linux/sysdev.h>
29#include <asm/atomic.h> 30#include <asm/atomic.h>
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/mtrr.h> 32#include <asm/mtrr.h>
@@ -548,6 +549,28 @@ static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
548 return aper_base; 549 return aper_base;
549} 550}
550 551
552static int gart_resume(struct sys_device *dev)
553{
554 return 0;
555}
556
557static int gart_suspend(struct sys_device *dev, pm_message_t state)
558{
559 return -EINVAL;
560}
561
562static struct sysdev_class gart_sysdev_class = {
563 .name = "gart",
564 .suspend = gart_suspend,
565 .resume = gart_resume,
566
567};
568
569static struct sys_device device_gart = {
570 .id = 0,
571 .cls = &gart_sysdev_class,
572};
573
551/* 574/*
552 * Private Northbridge GATT initialization in case we cannot use the 575 * Private Northbridge GATT initialization in case we cannot use the
553 * AGP driver for some reason. 576 * AGP driver for some reason.
@@ -558,7 +581,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
558 unsigned aper_base, new_aper_base; 581 unsigned aper_base, new_aper_base;
559 struct pci_dev *dev; 582 struct pci_dev *dev;
560 void *gatt; 583 void *gatt;
561 int i; 584 int i, error;
562 585
563 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 586 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
564 aper_size = aper_base = info->aper_size = 0; 587 aper_size = aper_base = info->aper_size = 0;
@@ -606,6 +629,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
606 629
607 pci_write_config_dword(dev, 0x90, ctl); 630 pci_write_config_dword(dev, 0x90, ctl);
608 } 631 }
632
633 error = sysdev_class_register(&gart_sysdev_class);
634 if (!error)
635 error = sysdev_register(&device_gart);
636 if (error)
637 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
609 flush_gart(); 638 flush_gart();
610 639
611 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", 640 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f8476dfbb60d..6d5483356e74 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -649,8 +649,11 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct
649 /* If the task has used fpu the last 5 timeslices, just do a full 649 /* If the task has used fpu the last 5 timeslices, just do a full
650 * restore of the math state immediately to avoid the trap; the 650 * restore of the math state immediately to avoid the trap; the
651 * chances of needing FPU soon are obviously high now 651 * chances of needing FPU soon are obviously high now
652 *
653 * tsk_used_math() checks prevent calling math_state_restore(),
654 * which can sleep in the case of !tsk_used_math()
652 */ 655 */
653 if (next_p->fpu_counter > 5) 656 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
654 math_state_restore(); 657 math_state_restore();
655 658
656 /* 659 /*
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e2319f39988b..ac54ff56df80 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -658,8 +658,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
658 /* If the task has used fpu the last 5 timeslices, just do a full 658 /* If the task has used fpu the last 5 timeslices, just do a full
659 * restore of the math state immediately to avoid the trap; the 659 * restore of the math state immediately to avoid the trap; the
660 * chances of needing FPU soon are obviously high now 660 * chances of needing FPU soon are obviously high now
661 *
662 * tsk_used_math() checks prevent calling math_state_restore(),
663 * which can sleep in the case of !tsk_used_math()
661 */ 664 */
662 if (next_p->fpu_counter>5) 665 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
663 math_state_restore(); 666 math_state_restore();
664 return prev_p; 667 return prev_p;
665} 668}
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 9615eee9b775..05191bbc68b8 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -4,6 +4,8 @@
4#include <linux/acpi.h> 4#include <linux/acpi.h>
5#include <linux/bcd.h> 5#include <linux/bcd.h>
6#include <linux/mc146818rtc.h> 6#include <linux/mc146818rtc.h>
7#include <linux/platform_device.h>
8#include <linux/pnp.h>
7 9
8#include <asm/time.h> 10#include <asm/time.h>
9#include <asm/vsyscall.h> 11#include <asm/vsyscall.h>
@@ -197,3 +199,35 @@ unsigned long long native_read_tsc(void)
197} 199}
198EXPORT_SYMBOL(native_read_tsc); 200EXPORT_SYMBOL(native_read_tsc);
199 201
202
203static struct resource rtc_resources[] = {
204 [0] = {
205 .start = RTC_PORT(0),
206 .end = RTC_PORT(1),
207 .flags = IORESOURCE_IO,
208 },
209 [1] = {
210 .start = RTC_IRQ,
211 .end = RTC_IRQ,
212 .flags = IORESOURCE_IRQ,
213 }
214};
215
216static struct platform_device rtc_device = {
217 .name = "rtc_cmos",
218 .id = -1,
219 .resource = rtc_resources,
220 .num_resources = ARRAY_SIZE(rtc_resources),
221};
222
223static __init int add_rtc_cmos(void)
224{
225#ifdef CONFIG_PNP
226 if (!pnp_platform_devices)
227 platform_device_register(&rtc_device);
228#else
229 platform_device_register(&rtc_device);
230#endif /* CONFIG_PNP */
231 return 0;
232}
233device_initcall(add_rtc_cmos);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 38988491c622..56078d61c793 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1190,6 +1190,7 @@ static void __init smp_cpu_index_default(void)
1190 */ 1190 */
1191void __init native_smp_prepare_cpus(unsigned int max_cpus) 1191void __init native_smp_prepare_cpus(unsigned int max_cpus)
1192{ 1192{
1193 preempt_disable();
1193 nmi_watchdog_default(); 1194 nmi_watchdog_default();
1194 smp_cpu_index_default(); 1195 smp_cpu_index_default();
1195 current_cpu_data = boot_cpu_data; 1196 current_cpu_data = boot_cpu_data;
@@ -1206,7 +1207,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1206 if (smp_sanity_check(max_cpus) < 0) { 1207 if (smp_sanity_check(max_cpus) < 0) {
1207 printk(KERN_INFO "SMP disabled\n"); 1208 printk(KERN_INFO "SMP disabled\n");
1208 disable_smp(); 1209 disable_smp();
1209 return; 1210 goto out;
1210 } 1211 }
1211 1212
1212 preempt_disable(); 1213 preempt_disable();
@@ -1246,6 +1247,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1246 printk(KERN_INFO "CPU%d: ", 0); 1247 printk(KERN_INFO "CPU%d: ", 0);
1247 print_cpu_info(&cpu_data(0)); 1248 print_cpu_info(&cpu_data(0));
1248 setup_boot_clock(); 1249 setup_boot_clock();
1250out:
1251 preempt_enable();
1249} 1252}
1250/* 1253/*
1251 * Early setup to make printk work. 1254 * Early setup to make printk work.
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index bde6f63e15d5..08d752de4eee 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -544,6 +544,7 @@ vm86_trap:
544#define DO_ERROR(trapnr, signr, str, name) \ 544#define DO_ERROR(trapnr, signr, str, name) \
545void do_##name(struct pt_regs *regs, long error_code) \ 545void do_##name(struct pt_regs *regs, long error_code) \
546{ \ 546{ \
547 trace_hardirqs_fixup(); \
547 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 548 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
548 == NOTIFY_STOP) \ 549 == NOTIFY_STOP) \
549 return; \ 550 return; \
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index e4790728b224..068759db63dd 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -14,7 +14,7 @@
14 14
15#include "mach_timer.h" 15#include "mach_timer.h"
16 16
17static int tsc_enabled; 17static int tsc_disabled;
18 18
19/* 19/*
20 * On some systems the TSC frequency does not 20 * On some systems the TSC frequency does not
@@ -28,8 +28,8 @@ EXPORT_SYMBOL_GPL(tsc_khz);
28static int __init tsc_setup(char *str) 28static int __init tsc_setup(char *str)
29{ 29{
30 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " 30 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
31 "cannot disable TSC completely.\n"); 31 "cannot disable TSC completely.\n");
32 mark_tsc_unstable("user disabled TSC"); 32 tsc_disabled = 1;
33 return 1; 33 return 1;
34} 34}
35#else 35#else
@@ -120,7 +120,7 @@ unsigned long long native_sched_clock(void)
120 * very important for it to be as fast as the platform 120 * very important for it to be as fast as the platform
121 * can achive it. ) 121 * can achive it. )
122 */ 122 */
123 if (unlikely(!tsc_enabled && !tsc_unstable)) 123 if (unlikely(tsc_disabled))
124 /* No locking but a rare wrong value is not a big deal: */ 124 /* No locking but a rare wrong value is not a big deal: */
125 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 125 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
126 126
@@ -322,7 +322,6 @@ void mark_tsc_unstable(char *reason)
322{ 322{
323 if (!tsc_unstable) { 323 if (!tsc_unstable) {
324 tsc_unstable = 1; 324 tsc_unstable = 1;
325 tsc_enabled = 0;
326 printk("Marking TSC unstable due to: %s.\n", reason); 325 printk("Marking TSC unstable due to: %s.\n", reason);
327 /* Can be called before registration */ 326 /* Can be called before registration */
328 if (clocksource_tsc.mult) 327 if (clocksource_tsc.mult)
@@ -336,7 +335,7 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
336static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) 335static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
337{ 336{
338 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", 337 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
339 d->ident); 338 d->ident);
340 tsc_unstable = 1; 339 tsc_unstable = 1;
341 return 0; 340 return 0;
342} 341}
@@ -403,14 +402,22 @@ void __init tsc_init(void)
403{ 402{
404 int cpu; 403 int cpu;
405 404
406 if (!cpu_has_tsc) 405 if (!cpu_has_tsc || tsc_disabled) {
406 /* Disable the TSC in case of !cpu_has_tsc */
407 tsc_disabled = 1;
407 return; 408 return;
409 }
408 410
409 cpu_khz = calculate_cpu_khz(); 411 cpu_khz = calculate_cpu_khz();
410 tsc_khz = cpu_khz; 412 tsc_khz = cpu_khz;
411 413
412 if (!cpu_khz) { 414 if (!cpu_khz) {
413 mark_tsc_unstable("could not calculate TSC khz"); 415 mark_tsc_unstable("could not calculate TSC khz");
416 /*
417 * We need to disable the TSC completely in this case
418 * to prevent sched_clock() from using it.
419 */
420 tsc_disabled = 1;
414 return; 421 return;
415 } 422 }
416 423
@@ -441,8 +448,6 @@ void __init tsc_init(void)
441 if (check_tsc_unstable()) { 448 if (check_tsc_unstable()) {
442 clocksource_tsc.rating = 0; 449 clocksource_tsc.rating = 0;
443 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 450 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
444 } else 451 }
445 tsc_enabled = 1;
446
447 clocksource_register(&clocksource_tsc); 452 clocksource_register(&clocksource_tsc);
448} 453}
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index fcc16e58609e..1784b8077a12 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -227,14 +227,14 @@ void __init tsc_calibrate(void)
227 /* hpet or pmtimer available ? */ 227 /* hpet or pmtimer available ? */
228 if (!hpet && !pm1 && !pm2) { 228 if (!hpet && !pm1 && !pm2) {
229 printk(KERN_INFO "TSC calibrated against PIT\n"); 229 printk(KERN_INFO "TSC calibrated against PIT\n");
230 return; 230 goto out;
231 } 231 }
232 232
233 /* Check, whether the sampling was disturbed by an SMI */ 233 /* Check, whether the sampling was disturbed by an SMI */
234 if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) { 234 if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) {
235 printk(KERN_WARNING "TSC calibration disturbed by SMI, " 235 printk(KERN_WARNING "TSC calibration disturbed by SMI, "
236 "using PIT calibration result\n"); 236 "using PIT calibration result\n");
237 return; 237 goto out;
238 } 238 }
239 239
240 tsc2 = (tsc2 - tsc1) * 1000000L; 240 tsc2 = (tsc2 - tsc1) * 1000000L;
@@ -255,6 +255,7 @@ void __init tsc_calibrate(void)
255 255
256 tsc_khz = tsc2 / tsc1; 256 tsc_khz = tsc2 / tsc1;
257 257
258out:
258 for_each_possible_cpu(cpu) 259 for_each_possible_cpu(cpu)
259 set_cyc2ns_scale(tsc_khz, cpu); 260 set_cyc2ns_scale(tsc_khz, cpu);
260} 261}
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 7c077a9d9777..f2f5d260874e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -200,7 +200,6 @@ int __pit_timer_fn(struct kvm_kpit_state *ps)
200 200
201 atomic_inc(&pt->pending); 201 atomic_inc(&pt->pending);
202 smp_mb__after_atomic_inc(); 202 smp_mb__after_atomic_inc();
203 /* FIXME: handle case where the guest is in guest mode */
204 if (vcpu0 && waitqueue_active(&vcpu0->wq)) { 203 if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
205 vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; 204 vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
206 wake_up_interruptible(&vcpu0->wq); 205 wake_up_interruptible(&vcpu0->wq);
@@ -237,6 +236,19 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
237 return HRTIMER_NORESTART; 236 return HRTIMER_NORESTART;
238} 237}
239 238
239void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
240{
241 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
242 struct hrtimer *timer;
243
244 if (vcpu->vcpu_id != 0 || !pit)
245 return;
246
247 timer = &pit->pit_state.pit_timer.timer;
248 if (hrtimer_cancel(timer))
249 hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
250}
251
240static void destroy_pit_timer(struct kvm_kpit_timer *pt) 252static void destroy_pit_timer(struct kvm_kpit_timer *pt)
241{ 253{
242 pr_debug("pit: execute del timer!\n"); 254 pr_debug("pit: execute del timer!\n");
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index ce1f583459b1..76d736b5f664 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -94,3 +94,9 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
94 /* TODO: PIT, RTC etc. */ 94 /* TODO: PIT, RTC etc. */
95} 95}
96EXPORT_SYMBOL_GPL(kvm_timer_intr_post); 96EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
97
98void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
99{
100 __kvm_migrate_apic_timer(vcpu);
101 __kvm_migrate_pit_timer(vcpu);
102}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 1802134b836f..2a15be2275c0 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -84,6 +84,8 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
84void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); 84void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
85void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); 85void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
86void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); 86void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
87void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
88void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
87 89
88int pit_has_pending_timer(struct kvm_vcpu *vcpu); 90int pit_has_pending_timer(struct kvm_vcpu *vcpu);
89int apic_has_pending_timer(struct kvm_vcpu *vcpu); 91int apic_has_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 36c5406b1813..ee3f53098f0c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -658,7 +658,7 @@ static int is_empty_shadow_page(u64 *spt)
658 u64 *end; 658 u64 *end;
659 659
660 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) 660 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
661 if (*pos != shadow_trap_nonpresent_pte) { 661 if (is_shadow_present_pte(*pos)) {
662 printk(KERN_ERR "%s: %p %llx\n", __func__, 662 printk(KERN_ERR "%s: %p %llx\n", __func__,
663 pos, *pos); 663 pos, *pos);
664 return 0; 664 return 0;
@@ -1858,6 +1858,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
1858 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, 1858 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1859 struct kvm_mmu_page, link); 1859 struct kvm_mmu_page, link);
1860 kvm_mmu_zap_page(vcpu->kvm, sp); 1860 kvm_mmu_zap_page(vcpu->kvm, sp);
1861 cond_resched();
1861 } 1862 }
1862 free_page((unsigned long)vcpu->arch.mmu.pae_root); 1863 free_page((unsigned long)vcpu->arch.mmu.pae_root);
1863} 1864}
@@ -1996,7 +1997,7 @@ static struct shrinker mmu_shrinker = {
1996 .seeks = DEFAULT_SEEKS * 10, 1997 .seeks = DEFAULT_SEEKS * 10,
1997}; 1998};
1998 1999
1999void mmu_destroy_caches(void) 2000static void mmu_destroy_caches(void)
2000{ 2001{
2001 if (pte_chain_cache) 2002 if (pte_chain_cache)
2002 kmem_cache_destroy(pte_chain_cache); 2003 kmem_cache_destroy(pte_chain_cache);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 156fe10288ae..934c7b619396 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -418,7 +418,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
418 418
419 /* mmio */ 419 /* mmio */
420 if (is_error_pfn(pfn)) { 420 if (is_error_pfn(pfn)) {
421 pgprintk("gfn %x is mmio\n", walker.gfn); 421 pgprintk("gfn %lx is mmio\n", walker.gfn);
422 kvm_release_pfn_clean(pfn); 422 kvm_release_pfn_clean(pfn);
423 return 1; 423 return 1;
424 } 424 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ab22615eee89..6b0d5fa5bab3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -688,7 +688,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
688 delta = vcpu->arch.host_tsc - tsc_this; 688 delta = vcpu->arch.host_tsc - tsc_this;
689 svm->vmcb->control.tsc_offset += delta; 689 svm->vmcb->control.tsc_offset += delta;
690 vcpu->cpu = cpu; 690 vcpu->cpu = cpu;
691 kvm_migrate_apic_timer(vcpu); 691 kvm_migrate_timers(vcpu);
692 } 692 }
693 693
694 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 694 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bfe4db11989c..02efbe75f317 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -608,7 +608,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
608 608
609 if (vcpu->cpu != cpu) { 609 if (vcpu->cpu != cpu) {
610 vcpu_clear(vmx); 610 vcpu_clear(vmx);
611 kvm_migrate_apic_timer(vcpu); 611 kvm_migrate_timers(vcpu);
612 vpid_sync_vcpu_all(vmx); 612 vpid_sync_vcpu_all(vmx);
613 } 613 }
614 614
@@ -1036,6 +1036,7 @@ static void hardware_enable(void *garbage)
1036static void hardware_disable(void *garbage) 1036static void hardware_disable(void *garbage)
1037{ 1037{
1038 asm volatile (ASM_VMX_VMXOFF : : : "cc"); 1038 asm volatile (ASM_VMX_VMXOFF : : : "cc");
1039 write_cr4(read_cr4() & ~X86_CR4_VMXE);
1039} 1040}
1040 1041
1041static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, 1042static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21338bdb28ff..00acf1301a15 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2758,7 +2758,7 @@ again:
2758 2758
2759 if (vcpu->requests) { 2759 if (vcpu->requests) {
2760 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) 2760 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2761 __kvm_migrate_apic_timer(vcpu); 2761 __kvm_migrate_timers(vcpu);
2762 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, 2762 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
2763 &vcpu->requests)) { 2763 &vcpu->requests)) {
2764 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; 2764 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 8a96320ab071..932f216d890c 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -1727,7 +1727,8 @@ twobyte_insn:
1727 if (rc) 1727 if (rc)
1728 goto done; 1728 goto done;
1729 1729
1730 kvm_emulate_hypercall(ctxt->vcpu); 1730 /* Let the processor re-execute the fixed hypercall */
1731 c->eip = ctxt->vcpu->arch.rip;
1731 /* Disable writeback. */ 1732 /* Disable writeback. */
1732 c->dst.type = OP_NONE; 1733 c->dst.type = OP_NONE;
1733 break; 1734 break;
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index af65b2da3ba0..5c7e2fd52075 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -582,8 +582,9 @@ static void __init lguest_init_IRQ(void)
582 int vector = FIRST_EXTERNAL_VECTOR + i; 582 int vector = FIRST_EXTERNAL_VECTOR + i;
583 if (vector != SYSCALL_VECTOR) { 583 if (vector != SYSCALL_VECTOR) {
584 set_intr_gate(vector, interrupt[i]); 584 set_intr_gate(vector, interrupt[i]);
585 set_irq_chip_and_handler(i, &lguest_irq_controller, 585 set_irq_chip_and_handler_name(i, &lguest_irq_controller,
586 handle_level_irq); 586 handle_level_irq,
587 "level");
587 } 588 }
588 } 589 }
589 /* This call is required to set up for 4k stacks, where we have 590 /* This call is required to set up for 4k stacks, where we have
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c
index 4535e6d147ad..d710f2d167bb 100644
--- a/arch/x86/lib/delay_32.c
+++ b/arch/x86/lib/delay_32.c
@@ -44,13 +44,36 @@ static void delay_loop(unsigned long loops)
44static void delay_tsc(unsigned long loops) 44static void delay_tsc(unsigned long loops)
45{ 45{
46 unsigned long bclock, now; 46 unsigned long bclock, now;
47 int cpu;
47 48
48 preempt_disable(); /* TSC's are per-cpu */ 49 preempt_disable();
50 cpu = smp_processor_id();
49 rdtscl(bclock); 51 rdtscl(bclock);
50 do { 52 for (;;) {
51 rep_nop();
52 rdtscl(now); 53 rdtscl(now);
53 } while ((now-bclock) < loops); 54 if ((now - bclock) >= loops)
55 break;
56
57 /* Allow RT tasks to run */
58 preempt_enable();
59 rep_nop();
60 preempt_disable();
61
62 /*
63 * It is possible that we moved to another CPU, and
64 * since TSC's are per-cpu we need to calculate
65 * that. The delay must guarantee that we wait "at
66 * least" the amount of time. Being moved to another
67 * CPU could make the wait longer but we just need to
68 * make sure we waited long enough. Rebalance the
69 * counter for this CPU.
70 */
71 if (unlikely(cpu != smp_processor_id())) {
72 loops -= (now - bclock);
73 cpu = smp_processor_id();
74 rdtscl(bclock);
75 }
76 }
54 preempt_enable(); 77 preempt_enable();
55} 78}
56 79
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
index bbc610518516..4c441be92641 100644
--- a/arch/x86/lib/delay_64.c
+++ b/arch/x86/lib/delay_64.c
@@ -31,14 +31,36 @@ int __devinit read_current_timer(unsigned long *timer_value)
31void __delay(unsigned long loops) 31void __delay(unsigned long loops)
32{ 32{
33 unsigned bclock, now; 33 unsigned bclock, now;
34 int cpu;
34 35
35 preempt_disable(); /* TSC's are pre-cpu */ 36 preempt_disable();
37 cpu = smp_processor_id();
36 rdtscl(bclock); 38 rdtscl(bclock);
37 do { 39 for (;;) {
38 rep_nop();
39 rdtscl(now); 40 rdtscl(now);
41 if ((now - bclock) >= loops)
42 break;
43
44 /* Allow RT tasks to run */
45 preempt_enable();
46 rep_nop();
47 preempt_disable();
48
49 /*
50 * It is possible that we moved to another CPU, and
51 * since TSC's are per-cpu we need to calculate
52 * that. The delay must guarantee that we wait "at
53 * least" the amount of time. Being moved to another
54 * CPU could make the wait longer but we just need to
55 * make sure we waited long enough. Rebalance the
56 * counter for this CPU.
57 */
58 if (unlikely(cpu != smp_processor_id())) {
59 loops -= (now - bclock);
60 cpu = smp_processor_id();
61 rdtscl(bclock);
62 }
40 } 63 }
41 while ((now-bclock) < loops);
42 preempt_enable(); 64 preempt_enable();
43} 65}
44EXPORT_SYMBOL(__delay); 66EXPORT_SYMBOL(__delay);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 6e38d877ea77..c7b06feb139b 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -30,6 +30,7 @@
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32#include <asm/user.h> 32#include <asm/user.h>
33#include <asm/i387.h>
33 34
34#include "fpu_system.h" 35#include "fpu_system.h"
35#include "fpu_emu.h" 36#include "fpu_emu.h"
@@ -146,6 +147,13 @@ asmlinkage void math_emulate(long arg)
146 unsigned long code_limit = 0; /* Initialized to stop compiler warnings */ 147 unsigned long code_limit = 0; /* Initialized to stop compiler warnings */
147 struct desc_struct code_descriptor; 148 struct desc_struct code_descriptor;
148 149
150 if (!used_math()) {
151 if (init_fpu(current)) {
152 do_group_exit(SIGKILL);
153 return;
154 }
155 }
156
149#ifdef RE_ENTRANT_CHECKING 157#ifdef RE_ENTRANT_CHECKING
150 if (emulating) { 158 if (emulating) {
151 printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n"); 159 printk("ERROR: wm-FPU-emu is not RE-ENTRANT!\n");
@@ -153,11 +161,6 @@ asmlinkage void math_emulate(long arg)
153 RE_ENTRANT_CHECK_ON; 161 RE_ENTRANT_CHECK_ON;
154#endif /* RE_ENTRANT_CHECKING */ 162#endif /* RE_ENTRANT_CHECKING */
155 163
156 if (!used_math()) {
157 finit();
158 set_used_math();
159 }
160
161 SETUP_DATA_AREA(arg); 164 SETUP_DATA_AREA(arg);
162 165
163 FPU_ORIG_EIP = FPU_EIP; 166 FPU_ORIG_EIP = FPU_EIP;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index fd7e1798c75a..8bcb6f40ccb6 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -497,6 +497,11 @@ static int vmalloc_fault(unsigned long address)
497 unsigned long pgd_paddr; 497 unsigned long pgd_paddr;
498 pmd_t *pmd_k; 498 pmd_t *pmd_k;
499 pte_t *pte_k; 499 pte_t *pte_k;
500
501 /* Make sure we are in vmalloc area */
502 if (!(address >= VMALLOC_START && address < VMALLOC_END))
503 return -1;
504
500 /* 505 /*
501 * Synchronize this task's top level page-table 506 * Synchronize this task's top level page-table
502 * with the 'reference' page table. 507 * with the 'reference' page table.
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 32ba13b0f818..156e6d7b0e32 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -206,7 +206,7 @@ void __init cleanup_highmap(void)
206 pmd_t *last_pmd = pmd + PTRS_PER_PMD; 206 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
207 207
208 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { 208 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
209 if (!pmd_present(*pmd)) 209 if (pmd_none(*pmd))
210 continue; 210 continue;
211 if (vaddr < (unsigned long) _text || vaddr > end) 211 if (vaddr < (unsigned long) _text || vaddr > end)
212 set_pmd(pmd, __pmd(0)); 212 set_pmd(pmd, __pmd(0));
@@ -506,7 +506,7 @@ early_param("memtest", parse_memtest);
506 506
507static void __init early_memtest(unsigned long start, unsigned long end) 507static void __init early_memtest(unsigned long start, unsigned long end)
508{ 508{
509 unsigned long t_start, t_size; 509 u64 t_start, t_size;
510 unsigned pattern; 510 unsigned pattern;
511 511
512 if (!memtest_pattern) 512 if (!memtest_pattern)
@@ -525,7 +525,7 @@ static void __init early_memtest(unsigned long start, unsigned long end)
525 if (t_start + t_size > end) 525 if (t_start + t_size > end)
526 t_size = end - t_start; 526 t_size = end - t_start;
527 527
528 printk(KERN_CONT "\n %016lx - %016lx pattern %d", 528 printk(KERN_CONT "\n %016llx - %016llx pattern %d",
529 t_start, t_start + t_size, pattern); 529 t_start, t_start + t_size, pattern);
530 530
531 memtest(t_start, t_size, pattern); 531 memtest(t_start, t_size, pattern);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 71bb3159031a..2b2bb3f9b683 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -593,10 +593,11 @@ void __init early_iounmap(void *addr, unsigned long size)
593 unsigned long offset; 593 unsigned long offset;
594 unsigned int nrpages; 594 unsigned int nrpages;
595 enum fixed_addresses idx; 595 enum fixed_addresses idx;
596 unsigned int nesting; 596 int nesting;
597 597
598 nesting = --early_ioremap_nested; 598 nesting = --early_ioremap_nested;
599 WARN_ON(nesting < 0); 599 if (WARN_ON(nesting < 0))
600 return;
600 601
601 if (early_ioremap_debug) { 602 if (early_ioremap_debug) {
602 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, 603 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index de3a99812450..06b7a1c90fb8 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -34,7 +34,7 @@ void __cpuinit pat_disable(char *reason)
34 printk(KERN_INFO "%s\n", reason); 34 printk(KERN_INFO "%s\n", reason);
35} 35}
36 36
37static int nopat(char *str) 37static int __init nopat(char *str)
38{ 38{
39 pat_disable("PAT support disabled."); 39 pat_disable("PAT support disabled.");
40 return 0; 40 return 0;
@@ -151,32 +151,33 @@ static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot,
151 unsigned long pat_type; 151 unsigned long pat_type;
152 u8 mtrr_type; 152 u8 mtrr_type;
153 153
154 mtrr_type = mtrr_type_lookup(start, end);
155 if (mtrr_type == 0xFF) { /* MTRR not enabled */
156 *ret_prot = prot;
157 return 0;
158 }
159 if (mtrr_type == 0xFE) { /* MTRR match error */
160 *ret_prot = _PAGE_CACHE_UC;
161 return -1;
162 }
163 if (mtrr_type != MTRR_TYPE_UNCACHABLE &&
164 mtrr_type != MTRR_TYPE_WRBACK &&
165 mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */
166 *ret_prot = _PAGE_CACHE_UC;
167 return -1;
168 }
169
170 pat_type = prot & _PAGE_CACHE_MASK; 154 pat_type = prot & _PAGE_CACHE_MASK;
171 prot &= (~_PAGE_CACHE_MASK); 155 prot &= (~_PAGE_CACHE_MASK);
172 156
173 /* Currently doing intersection by hand. Optimize it later. */ 157 /*
158 * We return the PAT request directly for types where PAT takes
159 * precedence with respect to MTRR and for UC_MINUS.
160 * Consistency checks with other PAT requests is done later
161 * while going through memtype list.
162 */
174 if (pat_type == _PAGE_CACHE_WC) { 163 if (pat_type == _PAGE_CACHE_WC) {
175 *ret_prot = prot | _PAGE_CACHE_WC; 164 *ret_prot = prot | _PAGE_CACHE_WC;
165 return 0;
176 } else if (pat_type == _PAGE_CACHE_UC_MINUS) { 166 } else if (pat_type == _PAGE_CACHE_UC_MINUS) {
177 *ret_prot = prot | _PAGE_CACHE_UC_MINUS; 167 *ret_prot = prot | _PAGE_CACHE_UC_MINUS;
178 } else if (pat_type == _PAGE_CACHE_UC || 168 return 0;
179 mtrr_type == MTRR_TYPE_UNCACHABLE) { 169 } else if (pat_type == _PAGE_CACHE_UC) {
170 *ret_prot = prot | _PAGE_CACHE_UC;
171 return 0;
172 }
173
174 /*
175 * Look for MTRR hint to get the effective type in case where PAT
176 * request is for WB.
177 */
178 mtrr_type = mtrr_type_lookup(start, end);
179
180 if (mtrr_type == MTRR_TYPE_UNCACHABLE) {
180 *ret_prot = prot | _PAGE_CACHE_UC; 181 *ret_prot = prot | _PAGE_CACHE_UC;
181 } else if (mtrr_type == MTRR_TYPE_WRCOMB) { 182 } else if (mtrr_type == MTRR_TYPE_WRCOMB) {
182 *ret_prot = prot | _PAGE_CACHE_WC; 183 *ret_prot = prot | _PAGE_CACHE_WC;
@@ -233,14 +234,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
233 234
234 if (req_type == -1) { 235 if (req_type == -1) {
235 /* 236 /*
236 * Special case where caller wants to inherit from mtrr or 237 * Call mtrr_lookup to get the type hint. This is an
237 * existing pat mapping, defaulting to UC_MINUS in case of 238 * optimization for /dev/mem mmap'ers into WB memory (BIOS
238 * no match. 239 * tools and ACPI tools). Use WB request for WB memory and use
240 * UC_MINUS otherwise.
239 */ 241 */
240 u8 mtrr_type = mtrr_type_lookup(start, end); 242 u8 mtrr_type = mtrr_type_lookup(start, end);
241 if (mtrr_type == 0xFE) { /* MTRR match error */
242 err = -1;
243 }
244 243
245 if (mtrr_type == MTRR_TYPE_WRBACK) { 244 if (mtrr_type == MTRR_TYPE_WRBACK) {
246 req_type = _PAGE_CACHE_WB; 245 req_type = _PAGE_CACHE_WB;
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 3890234e5b26..99649dccad28 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -97,36 +97,9 @@ static __init inline int srat_disabled(void)
97 return numa_off || acpi_numa < 0; 97 return numa_off || acpi_numa < 0;
98} 98}
99 99
100/*
101 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
102 * up the NUMA heuristics which wants the local node to have a smaller
103 * distance than the others.
104 * Do some quick checks here and only use the SLIT if it passes.
105 */
106static __init int slit_valid(struct acpi_table_slit *slit)
107{
108 int i, j;
109 int d = slit->locality_count;
110 for (i = 0; i < d; i++) {
111 for (j = 0; j < d; j++) {
112 u8 val = slit->entry[d*i + j];
113 if (i == j) {
114 if (val != LOCAL_DISTANCE)
115 return 0;
116 } else if (val <= LOCAL_DISTANCE)
117 return 0;
118 }
119 }
120 return 1;
121}
122
123/* Callback for SLIT parsing */ 100/* Callback for SLIT parsing */
124void __init acpi_numa_slit_init(struct acpi_table_slit *slit) 101void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
125{ 102{
126 if (!slit_valid(slit)) {
127 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
128 return;
129 }
130 acpi_slit = slit; 103 acpi_slit = slit;
131} 104}
132 105
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 6e64aaf00d1d..940185ecaeda 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -328,18 +328,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
328#endif 328#endif
329 { 329 {
330 .callback = set_bf_sort, 330 .callback = set_bf_sort,
331 .ident = "HP ProLiant DL385 G2", 331 .ident = "HP ProLiant DL360",
332 .matches = { 332 .matches = {
333 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 333 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
334 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"), 334 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
335 }, 335 },
336 }, 336 },
337 { 337 {
338 .callback = set_bf_sort, 338 .callback = set_bf_sort,
339 .ident = "HP ProLiant DL585 G2", 339 .ident = "HP ProLiant DL380",
340 .matches = { 340 .matches = {
341 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 341 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
342 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), 342 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
343 }, 343 },
344 }, 344 },
345 {} 345 {}
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c
index e70b9c57b88e..b821f4462d99 100644
--- a/arch/x86/pci/init.c
+++ b/arch/x86/pci/init.c
@@ -15,7 +15,8 @@ static __init int pci_access_init(void)
15 pci_mmcfg_early_init(); 15 pci_mmcfg_early_init();
16 16
17#ifdef CONFIG_PCI_OLPC 17#ifdef CONFIG_PCI_OLPC
18 pci_olpc_init(); 18 if (!pci_olpc_init())
19 return 0; /* skip additional checks if it's an XO */
19#endif 20#endif
20#ifdef CONFIG_PCI_BIOS 21#ifdef CONFIG_PCI_BIOS
21 pci_pcbios_init(); 22 pci_pcbios_init();
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 0908fca901bf..ca8df9c260bc 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -621,6 +621,13 @@ static __init int via_router_probe(struct irq_router *r,
621 */ 621 */
622 device = PCI_DEVICE_ID_VIA_8235; 622 device = PCI_DEVICE_ID_VIA_8235;
623 break; 623 break;
624 case PCI_DEVICE_ID_VIA_8237:
625 /**
626 * Asus a7v600 bios wrongly reports 8237
627 * as 586-compatible
628 */
629 device = PCI_DEVICE_ID_VIA_8237;
630 break;
624 } 631 }
625 } 632 }
626 633
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
index 5e7636558c02..e11e9e803d5f 100644
--- a/arch/x86/pci/olpc.c
+++ b/arch/x86/pci/olpc.c
@@ -302,12 +302,13 @@ static struct pci_raw_ops pci_olpc_conf = {
302 .write = pci_olpc_write, 302 .write = pci_olpc_write,
303}; 303};
304 304
305void __init pci_olpc_init(void) 305int __init pci_olpc_init(void)
306{ 306{
307 if (!machine_is_olpc() || olpc_has_vsa()) 307 if (!machine_is_olpc() || olpc_has_vsa())
308 return; 308 return -ENODEV;
309 309
310 printk(KERN_INFO "PCI: Using configuration type OLPC\n"); 310 printk(KERN_INFO "PCI: Using configuration type OLPC\n");
311 raw_pci_ops = &pci_olpc_conf; 311 raw_pci_ops = &pci_olpc_conf;
312 is_lx = is_geode_lx(); 312 is_lx = is_geode_lx();
313 return 0;
313} 314}
diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h
index f3972b12c60a..720c4c554534 100644
--- a/arch/x86/pci/pci.h
+++ b/arch/x86/pci/pci.h
@@ -101,7 +101,7 @@ extern struct pci_raw_ops pci_direct_conf1;
101extern int pci_direct_probe(void); 101extern int pci_direct_probe(void);
102extern void pci_direct_init(int type); 102extern void pci_direct_init(int type);
103extern void pci_pcbios_init(void); 103extern void pci_pcbios_init(void);
104extern void pci_olpc_init(void); 104extern int pci_olpc_init(void);
105 105
106/* pci-mmconfig.c */ 106/* pci-mmconfig.c */
107 107
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 23476c2ebfc4..efa2ba7c6005 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -106,9 +106,9 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
106 do_realtime((struct timespec *)tv); 106 do_realtime((struct timespec *)tv);
107 tv->tv_usec /= 1000; 107 tv->tv_usec /= 1000;
108 if (unlikely(tz != NULL)) { 108 if (unlikely(tz != NULL)) {
109 /* This relies on gcc inlining the memcpy. We'll notice 109 /* Avoid memcpy. Some old compilers fail to inline it */
110 if it ever fails to do so. */ 110 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
111 memcpy(tz, &gtod->sys_tz, sizeof(struct timezone)); 111 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
112 } 112 }
113 return 0; 113 return 0;
114 } 114 }
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 126766d43aea..3525ef523a74 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -60,7 +60,7 @@ xmaddr_t arbitrary_virt_to_machine(unsigned long address)
60{ 60{
61 unsigned int level; 61 unsigned int level;
62 pte_t *pte = lookup_address(address, &level); 62 pte_t *pte = lookup_address(address, &level);
63 unsigned offset = address & PAGE_MASK; 63 unsigned offset = address & ~PAGE_MASK;
64 64
65 BUG_ON(pte == NULL); 65 BUG_ON(pte == NULL);
66 66
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c39e1a5aa241..52b2e3856980 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -12,6 +12,7 @@
12#include <linux/clocksource.h> 12#include <linux/clocksource.h>
13#include <linux/clockchips.h> 13#include <linux/clockchips.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/math64.h>
15 16
16#include <asm/xen/hypervisor.h> 17#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h> 18#include <asm/xen/hypercall.h>
@@ -150,11 +151,7 @@ static void do_stolen_accounting(void)
150 if (stolen < 0) 151 if (stolen < 0)
151 stolen = 0; 152 stolen = 0;
152 153
153 ticks = 0; 154 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
154 while (stolen >= NS_PER_TICK) {
155 ticks++;
156 stolen -= NS_PER_TICK;
157 }
158 __get_cpu_var(residual_stolen) = stolen; 155 __get_cpu_var(residual_stolen) = stolen;
159 account_steal_time(NULL, ticks); 156 account_steal_time(NULL, ticks);
160 157
@@ -166,11 +163,7 @@ static void do_stolen_accounting(void)
166 if (blocked < 0) 163 if (blocked < 0)
167 blocked = 0; 164 blocked = 0;
168 165
169 ticks = 0; 166 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
170 while (blocked >= NS_PER_TICK) {
171 ticks++;
172 blocked -= NS_PER_TICK;
173 }
174 __get_cpu_var(residual_blocked) = blocked; 167 __get_cpu_var(residual_blocked) = blocked;
175 account_steal_time(idle_task(smp_processor_id()), ticks); 168 account_steal_time(idle_task(smp_processor_id()), ticks);
176} 169}