aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/boot/compressed/relocs.c7
-rw-r--r--arch/x86/boot/memory.c29
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/percpu.h10
-rw-r--r--arch/x86/include/asm/ptrace.h7
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/apic/es7000_32.c8
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c57
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c6
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/kgdb.c3
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/setup_percpu.c4
-rw-r--r--arch/x86/kernel/tlb_uv.c2
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/lguest/Makefile1
-rw-r--r--arch/x86/lguest/boot.c17
-rw-r--r--arch/x86/mm/hugetlbpage.c6
-rw-r--r--arch/x86/mm/pageattr.c13
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/pci/mmconfig-shared.c6
-rw-r--r--arch/x86/xen/Makefile5
-rw-r--r--arch/x86/xen/mmu.c1
-rw-r--r--arch/x86/xen/xen-ops.h19
35 files changed, 181 insertions, 101 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index df9e885eee14..a6efe0a2e9ae 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -498,6 +498,19 @@ config PARAVIRT
498 over full virtualization. However, when run without a hypervisor 498 over full virtualization. However, when run without a hypervisor
499 the kernel is theoretically slower and slightly larger. 499 the kernel is theoretically slower and slightly larger.
500 500
501config PARAVIRT_SPINLOCKS
502 bool "Paravirtualization layer for spinlocks"
503 depends on PARAVIRT && SMP && EXPERIMENTAL
504 ---help---
505 Paravirtualized spinlocks allow a pvops backend to replace the
506 spinlock implementation with something virtualization-friendly
507 (for example, block the virtual CPU rather than spinning).
508
509 Unfortunately the downside is an up to 5% performance hit on
510 native kernels, with various workloads.
511
512 If you are unsure how to answer this question, answer N.
513
501config PARAVIRT_CLOCK 514config PARAVIRT_CLOCK
502 bool 515 bool
503 default n 516 default n
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index 857e492c571e..bbeb0c3fbd90 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -504,8 +504,11 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
504 if (sym->st_shndx == SHN_ABS) { 504 if (sym->st_shndx == SHN_ABS) {
505 continue; 505 continue;
506 } 506 }
507 if (r_type == R_386_PC32) { 507 if (r_type == R_386_NONE || r_type == R_386_PC32) {
508 /* PC relative relocations don't need to be adjusted */ 508 /*
509 * NONE can be ignored and and PC relative
510 * relocations don't need to be adjusted.
511 */
509 } 512 }
510 else if (r_type == R_386_32) { 513 else if (r_type == R_386_32) {
511 /* Visit relocations that need to be adjusted */ 514 /* Visit relocations that need to be adjusted */
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
index 5054c2ddd1a0..74b3d2ba84e9 100644
--- a/arch/x86/boot/memory.c
+++ b/arch/x86/boot/memory.c
@@ -17,11 +17,6 @@
17 17
18#define SMAP 0x534d4150 /* ASCII "SMAP" */ 18#define SMAP 0x534d4150 /* ASCII "SMAP" */
19 19
20struct e820_ext_entry {
21 struct e820entry std;
22 u32 ext_flags;
23} __attribute__((packed));
24
25static int detect_memory_e820(void) 20static int detect_memory_e820(void)
26{ 21{
27 int count = 0; 22 int count = 0;
@@ -29,13 +24,21 @@ static int detect_memory_e820(void)
29 u32 size, id, edi; 24 u32 size, id, edi;
30 u8 err; 25 u8 err;
31 struct e820entry *desc = boot_params.e820_map; 26 struct e820entry *desc = boot_params.e820_map;
32 static struct e820_ext_entry buf; /* static so it is zeroed */ 27 static struct e820entry buf; /* static so it is zeroed */
33 28
34 /* 29 /*
35 * Set this here so that if the BIOS doesn't change this field 30 * Note: at least one BIOS is known which assumes that the
36 * but still doesn't change %ecx, we're still okay... 31 * buffer pointed to by one e820 call is the same one as
32 * the previous call, and only changes modified fields. Therefore,
33 * we use a temporary buffer and copy the results entry by entry.
34 *
35 * This routine deliberately does not try to account for
36 * ACPI 3+ extended attributes. This is because there are
37 * BIOSes in the field which report zero for the valid bit for
38 * all ranges, and we don't currently make any use of the
39 * other attribute bits. Revisit this if we see the extended
40 * attribute bits deployed in a meaningful way in the future.
37 */ 41 */
38 buf.ext_flags = 1;
39 42
40 do { 43 do {
41 size = sizeof buf; 44 size = sizeof buf;
@@ -66,13 +69,7 @@ static int detect_memory_e820(void)
66 break; 69 break;
67 } 70 }
68 71
69 /* ACPI 3.0 added the extended flags support. If bit 0 72 *desc++ = buf;
70 in the extended flags is zero, we're supposed to simply
71 ignore the entry -- a backwards incompatible change! */
72 if (size > 20 && !(buf.ext_flags & 1))
73 continue;
74
75 *desc++ = buf.std;
76 count++; 73 count++;
77 } while (next && count < ARRAY_SIZE(boot_params.e820_map)); 74 } while (next && count < ARRAY_SIZE(boot_params.e820_map));
78 75
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 378e3691c08c..a53da004e08e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64);
1443 1443
1444#define paravirt_nop ((void *)_paravirt_nop) 1444#define paravirt_nop ((void *)_paravirt_nop)
1445 1445
1446#ifdef CONFIG_SMP 1446#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
1447 1447
1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1449{ 1449{
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index aee103b26d01..02ecb30982a3 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -82,22 +82,22 @@ do { \
82 case 1: \ 82 case 1: \
83 asm(op "b %1,"__percpu_arg(0) \ 83 asm(op "b %1,"__percpu_arg(0) \
84 : "+m" (var) \ 84 : "+m" (var) \
85 : "ri" ((T__)val)); \ 85 : "qi" ((T__)(val))); \
86 break; \ 86 break; \
87 case 2: \ 87 case 2: \
88 asm(op "w %1,"__percpu_arg(0) \ 88 asm(op "w %1,"__percpu_arg(0) \
89 : "+m" (var) \ 89 : "+m" (var) \
90 : "ri" ((T__)val)); \ 90 : "ri" ((T__)(val))); \
91 break; \ 91 break; \
92 case 4: \ 92 case 4: \
93 asm(op "l %1,"__percpu_arg(0) \ 93 asm(op "l %1,"__percpu_arg(0) \
94 : "+m" (var) \ 94 : "+m" (var) \
95 : "ri" ((T__)val)); \ 95 : "ri" ((T__)(val))); \
96 break; \ 96 break; \
97 case 8: \ 97 case 8: \
98 asm(op "q %1,"__percpu_arg(0) \ 98 asm(op "q %1,"__percpu_arg(0) \
99 : "+m" (var) \ 99 : "+m" (var) \
100 : "re" ((T__)val)); \ 100 : "re" ((T__)(val))); \
101 break; \ 101 break; \
102 default: __bad_percpu_size(); \ 102 default: __bad_percpu_size(); \
103 } \ 103 } \
@@ -109,7 +109,7 @@ do { \
109 switch (sizeof(var)) { \ 109 switch (sizeof(var)) { \
110 case 1: \ 110 case 1: \
111 asm(op "b "__percpu_arg(1)",%0" \ 111 asm(op "b "__percpu_arg(1)",%0" \
112 : "=r" (ret__) \ 112 : "=q" (ret__) \
113 : "m" (var)); \ 113 : "m" (var)); \
114 break; \ 114 break; \
115 case 2: \ 115 case 2: \
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index e304b66abeea..624f133943ed 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs)
187 187
188/* 188/*
189 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode 189 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
190 * when it traps. So regs will be the current sp. 190 * when it traps. The previous stack will be directly underneath the saved
191 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
191 * 192 *
192 * This is valid only for kernel mode traps. 193 * This is valid only for kernel mode traps.
193 */ 194 */
194static inline unsigned long kernel_trap_sp(struct pt_regs *regs) 195static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
195{ 196{
196#ifdef CONFIG_X86_32 197#ifdef CONFIG_X86_32
197 return (unsigned long)regs; 198 return (unsigned long)(&regs->sp);
198#else 199#else
199 return regs->sp; 200 return regs->sp;
200#endif 201#endif
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index e5e6caffec87..b7e5db876399 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 172 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
173} 173}
174 174
175#ifndef CONFIG_PARAVIRT 175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176 176
177static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
178{ 178{
@@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
206 __raw_spin_lock(lock); 206 __raw_spin_lock(lock);
207} 207}
208 208
209#endif 209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 210
211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
212{ 212{
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 145cce75cda7..88d1bfc847d3 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -89,7 +89,8 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
89obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o 89obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
90obj-$(CONFIG_KVM_GUEST) += kvm.o 90obj-$(CONFIG_KVM_GUEST) += kvm.o
91obj-$(CONFIG_KVM_CLOCK) += kvmclock.o 91obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
92obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o 92obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
93obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
93obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o 94obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
94 95
95obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o 96obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 1c11b819f245..302947775575 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -254,7 +254,7 @@ static int parse_unisys_oem(char *oemptr)
254} 254}
255 255
256#ifdef CONFIG_ACPI 256#ifdef CONFIG_ACPI
257static int find_unisys_acpi_oem_table(unsigned long *oem_addr) 257static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
258{ 258{
259 struct acpi_table_header *header = NULL; 259 struct acpi_table_header *header = NULL;
260 struct es7000_oem_table *table; 260 struct es7000_oem_table *table;
@@ -285,7 +285,7 @@ static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
285 return 0; 285 return 0;
286} 286}
287 287
288static void unmap_unisys_acpi_oem_table(unsigned long oem_addr) 288static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
289{ 289{
290 if (!oem_addr) 290 if (!oem_addr)
291 return; 291 return;
@@ -306,7 +306,7 @@ static int es7000_check_dsdt(void)
306static int es7000_acpi_ret; 306static int es7000_acpi_ret;
307 307
308/* Hook from generic ACPI tables.c */ 308/* Hook from generic ACPI tables.c */
309static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 309static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
310{ 310{
311 unsigned long oem_addr = 0; 311 unsigned long oem_addr = 0;
312 int check_dsdt; 312 int check_dsdt;
@@ -717,7 +717,7 @@ struct apic apic_es7000_cluster = {
717 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 717 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
718}; 718};
719 719
720struct apic apic_es7000 = { 720struct apic __refdata apic_es7000 = {
721 721
722 .name = "es7000", 722 .name = "es7000",
723 .probe = probe_es7000, 723 .probe = probe_es7000,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c1caefc82e62..77848d9fca68 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -114,6 +114,13 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
114} }; 114} };
115EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 115EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
116 116
117static int __init x86_xsave_setup(char *s)
118{
119 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
120 return 1;
121}
122__setup("noxsave", x86_xsave_setup);
123
117#ifdef CONFIG_X86_32 124#ifdef CONFIG_X86_32
118static int cachesize_override __cpuinitdata = -1; 125static int cachesize_override __cpuinitdata = -1;
119static int disable_x86_serial_nr __cpuinitdata = 1; 126static int disable_x86_serial_nr __cpuinitdata = 1;
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 208ecf6643df..752e8c6b2c7e 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -550,7 +550,7 @@ static int __init acpi_cpufreq_early_init(void)
550 return -ENOMEM; 550 return -ENOMEM;
551 } 551 }
552 for_each_possible_cpu(i) { 552 for_each_possible_cpu(i) {
553 if (!alloc_cpumask_var_node( 553 if (!zalloc_cpumask_var_node(
554 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 554 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
555 GFP_KERNEL, cpu_to_node(i))) { 555 GFP_KERNEL, cpu_to_node(i))) {
556 556
@@ -693,8 +693,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
694 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
695 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
696 printk_once(KERN_INFO "Capping off P-state tranision" 696 printk_once(KERN_INFO
697 " latency at 20 uS\n"); 697 "P-state transition latency capped at 20 uS\n");
698 } 698 }
699 699
700 /* table init */ 700 /* table init */
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 6ac55bd341ae..869615193720 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -168,6 +168,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
168 case 0x0E: /* Core */ 168 case 0x0E: /* Core */
169 case 0x0F: /* Core Duo */ 169 case 0x0F: /* Core Duo */
170 case 0x16: /* Celeron Core */ 170 case 0x16: /* Celeron Core */
171 case 0x1C: /* Atom */
171 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; 172 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
172 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); 173 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
173 case 0x0D: /* Pentium M (Dothan) */ 174 case 0x0D: /* Pentium M (Dothan) */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 3c28ccd49742..d47c775eb0ab 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -168,10 +168,12 @@ static int check_powernow(void)
168 return 1; 168 return 1;
169} 169}
170 170
171#ifdef CONFIG_X86_POWERNOW_K7_ACPI
171static void invalidate_entry(unsigned int entry) 172static void invalidate_entry(unsigned int entry)
172{ 173{
173 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; 174 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
174} 175}
176#endif
175 177
176static int get_ranges(unsigned char *pst) 178static int get_ranges(unsigned char *pst)
177{ 179{
@@ -320,7 +322,7 @@ static int powernow_acpi_init(void)
320 goto err0; 322 goto err0;
321 } 323 }
322 324
323 if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, 325 if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
324 GFP_KERNEL)) { 326 GFP_KERNEL)) {
325 retval = -ENOMEM; 327 retval = -ENOMEM;
326 goto err05; 328 goto err05;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 4709ead2db52..cf52215d9eb1 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -649,6 +649,20 @@ static void print_basics(struct powernow_k8_data *data)
649 data->batps); 649 data->batps);
650} 650}
651 651
652static u32 freq_from_fid_did(u32 fid, u32 did)
653{
654 u32 mhz = 0;
655
656 if (boot_cpu_data.x86 == 0x10)
657 mhz = (100 * (fid + 0x10)) >> did;
658 else if (boot_cpu_data.x86 == 0x11)
659 mhz = (100 * (fid + 8)) >> did;
660 else
661 BUG();
662
663 return mhz * 1000;
664}
665
652static int fill_powernow_table(struct powernow_k8_data *data, 666static int fill_powernow_table(struct powernow_k8_data *data,
653 struct pst_s *pst, u8 maxvid) 667 struct pst_s *pst, u8 maxvid)
654{ 668{
@@ -821,7 +835,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
821{ 835{
822 struct cpufreq_frequency_table *powernow_table; 836 struct cpufreq_frequency_table *powernow_table;
823 int ret_val = -ENODEV; 837 int ret_val = -ENODEV;
824 acpi_integer space_id; 838 acpi_integer control, status;
825 839
826 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 840 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
827 dprintk("register performance failed: bad ACPI data\n"); 841 dprintk("register performance failed: bad ACPI data\n");
@@ -834,12 +848,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
834 goto err_out; 848 goto err_out;
835 } 849 }
836 850
837 space_id = data->acpi_data.control_register.space_id; 851 control = data->acpi_data.control_register.space_id;
838 if ((space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 852 status = data->acpi_data.status_register.space_id;
839 (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 853
854 if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
855 (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
840 dprintk("Invalid control/status registers (%x - %x)\n", 856 dprintk("Invalid control/status registers (%x - %x)\n",
841 data->acpi_data.control_register.space_id, 857 control, status);
842 space_id);
843 goto err_out; 858 goto err_out;
844 } 859 }
845 860
@@ -872,7 +887,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
872 /* notify BIOS that we exist */ 887 /* notify BIOS that we exist */
873 acpi_processor_notify_smm(THIS_MODULE); 888 acpi_processor_notify_smm(THIS_MODULE);
874 889
875 if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 890 if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
876 printk(KERN_ERR PFX 891 printk(KERN_ERR PFX
877 "unable to alloc powernow_k8_data cpumask\n"); 892 "unable to alloc powernow_k8_data cpumask\n");
878 ret_val = -ENOMEM; 893 ret_val = -ENOMEM;
@@ -923,8 +938,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
923 938
924 powernow_table[i].index = index; 939 powernow_table[i].index = index;
925 940
926 powernow_table[i].frequency = 941 /* Frequency may be rounded for these */
927 data->acpi_data.states[i].core_frequency * 1000; 942 if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
943 powernow_table[i].frequency =
944 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
945 } else
946 powernow_table[i].frequency =
947 data->acpi_data.states[i].core_frequency * 1000;
928 } 948 }
929 return 0; 949 return 0;
930} 950}
@@ -1215,13 +1235,16 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
1215 return cpufreq_frequency_table_verify(pol, data->powernow_table); 1235 return cpufreq_frequency_table_verify(pol, data->powernow_table);
1216} 1236}
1217 1237
1238static const char ACPI_PSS_BIOS_BUG_MSG[] =
1239 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1240 KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n";
1241
1218/* per CPU init entry point to the driver */ 1242/* per CPU init entry point to the driver */
1219static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1243static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1220{ 1244{
1221 struct powernow_k8_data *data; 1245 struct powernow_k8_data *data;
1222 cpumask_t oldmask; 1246 cpumask_t oldmask;
1223 int rc; 1247 int rc;
1224 static int print_once;
1225 1248
1226 if (!cpu_online(pol->cpu)) 1249 if (!cpu_online(pol->cpu))
1227 return -ENODEV; 1250 return -ENODEV;
@@ -1244,19 +1267,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1244 * an UP version, and is deprecated by AMD. 1267 * an UP version, and is deprecated by AMD.
1245 */ 1268 */
1246 if (num_online_cpus() != 1) { 1269 if (num_online_cpus() != 1) {
1247 /* 1270 printk_once(ACPI_PSS_BIOS_BUG_MSG);
1248 * Replace this one with print_once as soon as such a
1249 * thing gets introduced
1250 */
1251 if (!print_once) {
1252 WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS "
1253 "does not provide ACPI _PSS objects "
1254 "in a way that Linux understands. "
1255 "Please report this to the Linux ACPI"
1256 " maintainers and complain to your "
1257 "BIOS vendor.\n");
1258 print_once++;
1259 }
1260 goto err_out; 1271 goto err_out;
1261 } 1272 }
1262 if (pol->cpu != 0) { 1273 if (pol->cpu != 0) {
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index c9f1fdc02830..55c831ed71ce 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -471,7 +471,7 @@ static int centrino_target (struct cpufreq_policy *policy,
471 471
472 if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) 472 if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
473 return -ENOMEM; 473 return -ENOMEM;
474 if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { 474 if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
475 free_cpumask_var(saved_mask); 475 free_cpumask_var(saved_mask);
476 return -ENOMEM; 476 return -ENOMEM;
477 } 477 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 6fb0b359d2a5..09dd1d414fc3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -1163,7 +1163,7 @@ static __init int mce_init_device(void)
1163 if (!mce_available(&boot_cpu_data)) 1163 if (!mce_available(&boot_cpu_data))
1164 return -EIO; 1164 return -EIO;
1165 1165
1166 alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); 1166 zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
1167 1167
1168 err = mce_init_banks(); 1168 err = mce_init_banks();
1169 if (err) 1169 if (err)
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0b776c09aff3..d21d4fb161f7 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -275,7 +275,11 @@ static void __init print_mtrr_state(void)
275 } 275 }
276 printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", 276 printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
277 mtrr_state.enabled & 2 ? "en" : "dis"); 277 mtrr_state.enabled & 2 ? "en" : "dis");
278 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; 278 if (size_or_mask & 0xffffffffUL)
279 high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
280 else
281 high_width = ffs(size_or_mask>>32) + 32 - 1;
282 high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
279 for (i = 0; i < num_var_ranges; ++i) { 283 for (i = 0; i < num_var_ranges; ++i) {
280 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 284 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
281 printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", 285 printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n",
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 18dfa30795c9..b79c5533c421 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -442,7 +442,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
442 _ASM_EXTABLE(1b, 4b) 442 _ASM_EXTABLE(1b, 4b)
443 _ASM_EXTABLE(2b, 4b) 443 _ASM_EXTABLE(2b, 4b)
444 444
445 : [old] "=r" (old), [faulted] "=r" (faulted) 445 : [old] "=&r" (old), [faulted] "=r" (faulted)
446 : [parent] "r" (parent), [return_hooker] "r" (return_hooker) 446 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
447 : "memory" 447 : "memory"
448 ); 448 );
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index eedfaebe1063..b1f4dffb919e 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -88,6 +88,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
88 gdb_regs[GDB_SS] = __KERNEL_DS; 88 gdb_regs[GDB_SS] = __KERNEL_DS;
89 gdb_regs[GDB_FS] = 0xFFFF; 89 gdb_regs[GDB_FS] = 0xFFFF;
90 gdb_regs[GDB_GS] = 0xFFFF; 90 gdb_regs[GDB_GS] = 0xFFFF;
91 gdb_regs[GDB_SP] = (int)&regs->sp;
91#else 92#else
92 gdb_regs[GDB_R8] = regs->r8; 93 gdb_regs[GDB_R8] = regs->r8;
93 gdb_regs[GDB_R9] = regs->r9; 94 gdb_regs[GDB_R9] = regs->r9;
@@ -100,8 +101,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
100 gdb_regs32[GDB_PS] = regs->flags; 101 gdb_regs32[GDB_PS] = regs->flags;
101 gdb_regs32[GDB_CS] = regs->cs; 102 gdb_regs32[GDB_CS] = regs->cs;
102 gdb_regs32[GDB_SS] = regs->ss; 103 gdb_regs32[GDB_SS] = regs->ss;
103#endif
104 gdb_regs[GDB_SP] = regs->sp; 104 gdb_regs[GDB_SP] = regs->sp;
105#endif
105} 106}
106 107
107/** 108/**
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 8e45f4464880..9faf43bea336 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -134,7 +134,9 @@ static void *get_call_destination(u8 type)
134 .pv_irq_ops = pv_irq_ops, 134 .pv_irq_ops = pv_irq_ops,
135 .pv_apic_ops = pv_apic_ops, 135 .pv_apic_ops = pv_apic_ops,
136 .pv_mmu_ops = pv_mmu_ops, 136 .pv_mmu_ops = pv_mmu_ops,
137#ifdef CONFIG_PARAVIRT_SPINLOCKS
137 .pv_lock_ops = pv_lock_ops, 138 .pv_lock_ops = pv_lock_ops,
139#endif
138 }; 140 };
139 return *((void **)&tmpl + type); 141 return *((void **)&tmpl + type);
140} 142}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 1340dad417f4..667188e0b5a0 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -232,6 +232,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), 232 DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
233 }, 233 },
234 }, 234 },
235 { /* Handle problems with rebooting on Sony VGN-Z540N */
236 .callback = set_bios_reboot,
237 .ident = "Sony VGN-Z540N",
238 .matches = {
239 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
240 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
241 },
242 },
235 { } 243 { }
236}; 244};
237 245
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 3a97a4cf1872..8f0e13be36b3 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -160,8 +160,10 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
160 /* 160 /*
161 * If large page isn't supported, there's no benefit in doing 161 * If large page isn't supported, there's no benefit in doing
162 * this. Also, on non-NUMA, embedding is better. 162 * this. Also, on non-NUMA, embedding is better.
163 *
164 * NOTE: disabled for now.
163 */ 165 */
164 if (!cpu_has_pse || !pcpu_need_numa()) 166 if (true || !cpu_has_pse || !pcpu_need_numa())
165 return -EINVAL; 167 return -EINVAL;
166 168
167 /* 169 /*
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index ed0c33761e6d..8c7b03b0cfcb 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -832,7 +832,7 @@ static int __init uv_bau_init(void)
832 return 0; 832 return 0;
833 833
834 for_each_possible_cpu(cur_cpu) 834 for_each_possible_cpu(cur_cpu)
835 alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 835 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
836 GFP_KERNEL, cpu_to_node(cur_cpu)); 836 GFP_KERNEL, cpu_to_node(cur_cpu));
837 837
838 uv_bau_retry_limit = 1; 838 uv_bau_retry_limit = 1;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b6caf1329b1b..32cf11e5728a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2897,8 +2897,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2897 2897
2898static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) 2898static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2899{ 2899{
2900 kvm_x86_ops->tlb_flush(vcpu); 2900 kvm_set_cr3(vcpu, vcpu->arch.cr3);
2901 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
2902 return 1; 2901 return 1;
2903} 2902}
2904 2903
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1821c2078199..1f8510c51d6e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -411,7 +411,6 @@ static __init int svm_hardware_setup(void)
411 411
412 iopm_va = page_address(iopm_pages); 412 iopm_va = page_address(iopm_pages);
413 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); 413 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
414 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
415 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 414 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
416 415
417 if (boot_cpu_has(X86_FEATURE_NX)) 416 if (boot_cpu_has(X86_FEATURE_NX))
@@ -796,6 +795,11 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
796 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 795 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
797 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; 796 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
798 797
798 /* AMD's VMCB does not have an explicit unusable field, so emulate it
799 * for cross vendor migration purposes by "not present"
800 */
801 var->unusable = !var->present || (var->type == 0);
802
799 switch (seg) { 803 switch (seg) {
800 case VCPU_SREG_CS: 804 case VCPU_SREG_CS:
801 /* 805 /*
@@ -827,8 +831,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
827 var->type |= 0x1; 831 var->type |= 0x1;
828 break; 832 break;
829 } 833 }
830
831 var->unusable = !var->present;
832} 834}
833 835
834static int svm_get_cpl(struct kvm_vcpu *vcpu) 836static int svm_get_cpl(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7c1ce5ac6131..3944e917e794 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -338,6 +338,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
338 338
339void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 339void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
340{ 340{
341 unsigned long old_cr4 = vcpu->arch.cr4;
342 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
343
341 if (cr4 & CR4_RESERVED_BITS) { 344 if (cr4 & CR4_RESERVED_BITS) {
342 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); 345 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
343 kvm_inject_gp(vcpu, 0); 346 kvm_inject_gp(vcpu, 0);
@@ -351,7 +354,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
351 kvm_inject_gp(vcpu, 0); 354 kvm_inject_gp(vcpu, 0);
352 return; 355 return;
353 } 356 }
354 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) 357 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
358 && ((cr4 ^ old_cr4) & pdptr_bits)
355 && !load_pdptrs(vcpu, vcpu->arch.cr3)) { 359 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
356 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); 360 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
357 kvm_inject_gp(vcpu, 0); 361 kvm_inject_gp(vcpu, 0);
@@ -1121,9 +1125,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1121 1125
1122static int is_efer_nx(void) 1126static int is_efer_nx(void)
1123{ 1127{
1124 u64 efer; 1128 unsigned long long efer = 0;
1125 1129
1126 rdmsrl(MSR_EFER, efer); 1130 rdmsrl_safe(MSR_EFER, &efer);
1127 return efer & EFER_NX; 1131 return efer & EFER_NX;
1128} 1132}
1129 1133
@@ -1259,7 +1263,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1259 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) | 1263 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1260 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) | 1264 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1261 bit(X86_FEATURE_SYSCALL) | 1265 bit(X86_FEATURE_SYSCALL) |
1262 (bit(X86_FEATURE_NX) && is_efer_nx()) | 1266 (is_efer_nx() ? bit(X86_FEATURE_NX) : 0) |
1263#ifdef CONFIG_X86_64 1267#ifdef CONFIG_X86_64
1264 bit(X86_FEATURE_LM) | 1268 bit(X86_FEATURE_LM) |
1265#endif 1269#endif
diff --git a/arch/x86/lguest/Makefile b/arch/x86/lguest/Makefile
index 27f0c9ed7f60..94e0e54056a9 100644
--- a/arch/x86/lguest/Makefile
+++ b/arch/x86/lguest/Makefile
@@ -1 +1,2 @@
1obj-y := i386_head.o boot.o 1obj-y := i386_head.o boot.o
2CFLAGS_boot.o := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index ca7ec44bafc3..33a93b417396 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -67,6 +67,7 @@
67#include <asm/mce.h> 67#include <asm/mce.h>
68#include <asm/io.h> 68#include <asm/io.h>
69#include <asm/i387.h> 69#include <asm/i387.h>
70#include <asm/stackprotector.h>
70#include <asm/reboot.h> /* for struct machine_ops */ 71#include <asm/reboot.h> /* for struct machine_ops */
71 72
72/*G:010 Welcome to the Guest! 73/*G:010 Welcome to the Guest!
@@ -1088,13 +1089,21 @@ __init void lguest_init(void)
1088 * lguest_init() where the rest of the fairly chaotic boot setup 1089 * lguest_init() where the rest of the fairly chaotic boot setup
1089 * occurs. */ 1090 * occurs. */
1090 1091
1092 /* The stack protector is a weird thing where gcc places a canary
1093 * value on the stack and then checks it on return. This file is
1094 * compiled with -fno-stack-protector it, so we got this far without
1095 * problems. The value of the canary is kept at offset 20 from the
1096 * %gs register, so we need to set that up before calling C functions
1097 * in other files. */
1098 setup_stack_canary_segment(0);
1099 /* We could just call load_stack_canary_segment(), but we might as
1100 * call switch_to_new_gdt() which loads the whole table and sets up
1101 * the per-cpu segment descriptor register %fs as well. */
1102 switch_to_new_gdt(0);
1103
1091 /* As described in head_32.S, we map the first 128M of memory. */ 1104 /* As described in head_32.S, we map the first 128M of memory. */
1092 max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; 1105 max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
1093 1106
1094 /* Load the %fs segment register (the per-cpu segment register) with
1095 * the normal data segment to get through booting. */
1096 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
1097
1098 /* The Host<->Guest Switcher lives at the top of our address space, and 1107 /* The Host<->Guest Switcher lives at the top of our address space, and
1099 * the Host told us how big it is when we made LGUEST_INIT hypercall: 1108 * the Host told us how big it is when we made LGUEST_INIT hypercall:
1100 * it put the answer in lguest_data.reserve_mem */ 1109 * it put the answer in lguest_data.reserve_mem */
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 8f307d914c2e..f46c340727b8 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
26 unsigned long sbase = saddr & PUD_MASK; 26 unsigned long sbase = saddr & PUD_MASK;
27 unsigned long s_end = sbase + PUD_SIZE; 27 unsigned long s_end = sbase + PUD_SIZE;
28 28
29 /* Allow segments to share if only one is marked locked */
30 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
31 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
32
29 /* 33 /*
30 * match the virtual addresses, permission and the alignment of the 34 * match the virtual addresses, permission and the alignment of the
31 * page table page. 35 * page table page.
32 */ 36 */
33 if (pmd_index(addr) != pmd_index(saddr) || 37 if (pmd_index(addr) != pmd_index(saddr) ||
34 vma->vm_flags != svma->vm_flags || 38 vm_flags != svm_flags ||
35 sbase < svma->vm_start || svma->vm_end < s_end) 39 sbase < svma->vm_start || svma->vm_end < s_end)
36 return 0; 40 return 0;
37 41
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 797f9f107cb6..e17efed088c5 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -153,7 +153,7 @@ static void __cpa_flush_all(void *arg)
153 */ 153 */
154 __flush_tlb_all(); 154 __flush_tlb_all();
155 155
156 if (cache && boot_cpu_data.x86_model >= 4) 156 if (cache && boot_cpu_data.x86 >= 4)
157 wbinvd(); 157 wbinvd();
158} 158}
159 159
@@ -208,20 +208,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
208 int in_flags, struct page **pages) 208 int in_flags, struct page **pages)
209{ 209{
210 unsigned int i, level; 210 unsigned int i, level;
211 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
211 212
212 BUG_ON(irqs_disabled()); 213 BUG_ON(irqs_disabled());
213 214
214 on_each_cpu(__cpa_flush_range, NULL, 1); 215 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
215 216
216 if (!cache) 217 if (!cache || do_wbinvd)
217 return; 218 return;
218 219
219 /* 4M threshold */
220 if (numpages >= 1024) {
221 if (boot_cpu_data.x86_model >= 4)
222 wbinvd();
223 return;
224 }
225 /* 220 /*
226 * We only need to flush on one CPU, 221 * We only need to flush on one CPU,
227 * clflush is a MESI-coherent instruction that 222 * clflush is a MESI-coherent instruction that
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 04df67f8a7ba..044897be021f 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -76,9 +76,9 @@ void
76x86_backtrace(struct pt_regs * const regs, unsigned int depth) 76x86_backtrace(struct pt_regs * const regs, unsigned int depth)
77{ 77{
78 struct frame_head *head = (struct frame_head *)frame_pointer(regs); 78 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
79 unsigned long stack = kernel_trap_sp(regs);
80 79
81 if (!user_mode_vm(regs)) { 80 if (!user_mode_vm(regs)) {
81 unsigned long stack = kernel_stack_pointer(regs);
82 if (depth) 82 if (depth)
83 dump_trace(NULL, regs, (unsigned long *)stack, 0, 83 dump_trace(NULL, regs, (unsigned long *)stack, 0,
84 &backtrace_ops, &depth); 84 &backtrace_ops, &depth);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 5fa10bb9604f..8766b0e216c5 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -375,7 +375,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
375 if (!fixmem32) 375 if (!fixmem32)
376 return AE_OK; 376 return AE_OK;
377 if ((mcfg_res->start >= fixmem32->address) && 377 if ((mcfg_res->start >= fixmem32->address) &&
378 (mcfg_res->end <= (fixmem32->address + 378 (mcfg_res->end < (fixmem32->address +
379 fixmem32->address_length))) { 379 fixmem32->address_length))) {
380 mcfg_res->flags = 1; 380 mcfg_res->flags = 1;
381 return AE_CTRL_TERMINATE; 381 return AE_CTRL_TERMINATE;
@@ -392,7 +392,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
392 return AE_OK; 392 return AE_OK;
393 393
394 if ((mcfg_res->start >= address.minimum) && 394 if ((mcfg_res->start >= address.minimum) &&
395 (mcfg_res->end <= (address.minimum + address.address_length))) { 395 (mcfg_res->end < (address.minimum + address.address_length))) {
396 mcfg_res->flags = 1; 396 mcfg_res->flags = 1;
397 return AE_CTRL_TERMINATE; 397 return AE_CTRL_TERMINATE;
398 } 398 }
@@ -418,7 +418,7 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
418 struct resource mcfg_res; 418 struct resource mcfg_res;
419 419
420 mcfg_res.start = start; 420 mcfg_res.start = start;
421 mcfg_res.end = end; 421 mcfg_res.end = end - 1;
422 mcfg_res.flags = 0; 422 mcfg_res.flags = 0;
423 423
424 acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL); 424 acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 3b767d03fd6a..172438f86a02 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -9,5 +9,6 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
9 time.o xen-asm.o xen-asm_$(BITS).o \ 9 time.o xen-asm.o xen-asm_$(BITS).o \
10 grant-table.o suspend.o 10 grant-table.o suspend.o
11 11
12obj-$(CONFIG_SMP) += smp.o spinlock.o 12obj-$(CONFIG_SMP) += smp.o
13obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file 13obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
14obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e25a78e1113a..fba55b1a4021 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,6 +42,7 @@
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/bug.h> 44#include <linux/bug.h>
45#include <linux/module.h>
45 46
46#include <asm/pgtable.h> 47#include <asm/pgtable.h>
47#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 20139464943c..ca6596b05d53 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -62,15 +62,26 @@ void xen_setup_vcpu_info_placement(void);
62#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP
63void xen_smp_init(void); 63void xen_smp_init(void);
64 64
65void __init xen_init_spinlocks(void);
66__cpuinit void xen_init_lock_cpu(int cpu);
67void xen_uninit_lock_cpu(int cpu);
68
69extern cpumask_var_t xen_cpu_initialized_map; 65extern cpumask_var_t xen_cpu_initialized_map;
70#else 66#else
71static inline void xen_smp_init(void) {} 67static inline void xen_smp_init(void) {}
72#endif 68#endif
73 69
70#ifdef CONFIG_PARAVIRT_SPINLOCKS
71void __init xen_init_spinlocks(void);
72__cpuinit void xen_init_lock_cpu(int cpu);
73void xen_uninit_lock_cpu(int cpu);
74#else
75static inline void xen_init_spinlocks(void)
76{
77}
78static inline void xen_init_lock_cpu(int cpu)
79{
80}
81static inline void xen_uninit_lock_cpu(int cpu)
82{
83}
84#endif
74 85
75/* Declare an asm function, along with symbols needed to make it 86/* Declare an asm function, along with symbols needed to make it
76 inlineable */ 87 inlineable */