aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/boot/compressed/Makefile6
-rw-r--r--arch/x86/boot/compressed/misc.c4
-rw-r--r--arch/x86/include/asm/acpi.h26
-rw-r--r--arch/x86/include/asm/kvm.h4
-rw-r--r--arch/x86/include/asm/mce.h3
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/uaccess_32.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h5
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h98
-rw-r--r--arch/x86/kernel/acpi/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c22
-rw-r--r--arch/x86/kernel/acpi/processor.c101
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/amd_iommu_init.c10
-rw-r--r--arch/x86/kernel/aperture_64.c1
-rw-r--r--arch/x86/kernel/apic/apic.c23
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c5
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/apic/probe_64.c13
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c38
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c9
-rw-r--r--arch/x86/kernel/dumpstack.c14
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c14
-rw-r--r--arch/x86/kernel/process_64.c24
-rw-r--r--arch/x86/kvm/lapic.c1
-rw-r--r--arch/x86/kvm/paging_tmpl.h18
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--arch/x86/mm/kmemcheck/error.c19
-rw-r--r--arch/x86/mm/kmmio.c7
-rw-r--r--arch/x86/pci/bus_numa.c2
-rw-r--r--arch/x86/pci/intel_bus.c4
-rw-r--r--arch/x86/tools/chkobjdump.awk16
-rw-r--r--arch/x86/xen/enlighten.c4
40 files changed, 271 insertions, 276 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 55298e891571..cbcbfdee3ee0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -49,6 +49,7 @@ config X86
49 select HAVE_KERNEL_GZIP 49 select HAVE_KERNEL_GZIP
50 select HAVE_KERNEL_BZIP2 50 select HAVE_KERNEL_BZIP2
51 select HAVE_KERNEL_LZMA 51 select HAVE_KERNEL_LZMA
52 select HAVE_KERNEL_LZO
52 select HAVE_HW_BREAKPOINT 53 select HAVE_HW_BREAKPOINT
53 select PERF_EVENTS 54 select PERF_EVENTS
54 select ANON_INODES 55 select ANON_INODES
@@ -1246,6 +1247,11 @@ config ARCH_MEMORY_PROBE
1246 def_bool X86_64 1247 def_bool X86_64
1247 depends on MEMORY_HOTPLUG 1248 depends on MEMORY_HOTPLUG
1248 1249
1250config ILLEGAL_POINTER_VALUE
1251 hex
1252 default 0 if X86_32
1253 default 0xdead000000000000 if X86_64
1254
1249source "mm/Kconfig" 1255source "mm/Kconfig"
1250 1256
1251config HIGHPTE 1257config HIGHPTE
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 08e442bc3ab9..f20ddf84a893 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -396,7 +396,7 @@ config X86_TSC
396 396
397config X86_CMPXCHG64 397config X86_CMPXCHG64
398 def_bool y 398 def_bool y
399 depends on !M386 && !M486 399 depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
400 400
401# this should be set for all -march=.. options where the compiler 401# this should be set for all -march=.. options where the compiler
402# generates cmov. 402# generates cmov.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index f8ed0658404c..fbb47daf2459 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,11 +4,12 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o 7targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo head_$(BITS).o misc.o piggy.o
8 8
9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
11KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 11KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
12cflags-$(CONFIG_X86_32) := -march=i386
12cflags-$(CONFIG_X86_64) := -mcmodel=small 13cflags-$(CONFIG_X86_64) := -mcmodel=small
13KBUILD_CFLAGS += $(cflags-y) 14KBUILD_CFLAGS += $(cflags-y)
14KBUILD_CFLAGS += $(call cc-option,-ffreestanding) 15KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
@@ -48,10 +49,13 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
48 $(call if_changed,bzip2) 49 $(call if_changed,bzip2)
49$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE 50$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
50 $(call if_changed,lzma) 51 $(call if_changed,lzma)
52$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
53 $(call if_changed,lzo)
51 54
52suffix-$(CONFIG_KERNEL_GZIP) := gz 55suffix-$(CONFIG_KERNEL_GZIP) := gz
53suffix-$(CONFIG_KERNEL_BZIP2) := bz2 56suffix-$(CONFIG_KERNEL_BZIP2) := bz2
54suffix-$(CONFIG_KERNEL_LZMA) := lzma 57suffix-$(CONFIG_KERNEL_LZMA) := lzma
58suffix-$(CONFIG_KERNEL_LZO) := lzo
55 59
56quiet_cmd_mkpiggy = MKPIGGY $@ 60quiet_cmd_mkpiggy = MKPIGGY $@
57 cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) 61 cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 842b2a36174a..3b22fe8ab91b 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -162,6 +162,10 @@ static int lines, cols;
162#include "../../../../lib/decompress_unlzma.c" 162#include "../../../../lib/decompress_unlzma.c"
163#endif 163#endif
164 164
165#ifdef CONFIG_KERNEL_LZO
166#include "../../../../lib/decompress_unlzo.c"
167#endif
168
165static void scroll(void) 169static void scroll(void)
166{ 170{
167 int i; 171 int i;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 60d2b2db0bc5..56f462cf22d2 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -142,6 +142,32 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
142 return max_cstate; 142 return max_cstate;
143} 143}
144 144
145static inline bool arch_has_acpi_pdc(void)
146{
147 struct cpuinfo_x86 *c = &cpu_data(0);
148 return (c->x86_vendor == X86_VENDOR_INTEL ||
149 c->x86_vendor == X86_VENDOR_CENTAUR);
150}
151
152static inline void arch_acpi_set_pdc_bits(u32 *buf)
153{
154 struct cpuinfo_x86 *c = &cpu_data(0);
155
156 buf[2] |= ACPI_PDC_C_CAPABILITY_SMP;
157
158 if (cpu_has(c, X86_FEATURE_EST))
159 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
160
161 if (cpu_has(c, X86_FEATURE_ACPI))
162 buf[2] |= ACPI_PDC_T_FFH;
163
164 /*
165 * If mwait/monitor is unsupported, C2/C3_FFH will be disabled
166 */
167 if (!cpu_has(c, X86_FEATURE_MWAIT))
168 buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
169}
170
145#else /* !CONFIG_ACPI */ 171#else /* !CONFIG_ACPI */
146 172
147#define acpi_lapic 0 173#define acpi_lapic 0
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 950df434763f..f46b79f6c16c 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -254,6 +254,10 @@ struct kvm_reinject_control {
254 __u8 reserved[31]; 254 __u8 reserved[31];
255}; 255};
256 256
257/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
258#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
259#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
260
257/* for KVM_GET/SET_VCPU_EVENTS */ 261/* for KVM_GET/SET_VCPU_EVENTS */
258struct kvm_vcpu_events { 262struct kvm_vcpu_events {
259 struct { 263 struct {
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 858baa061cfc..6c3fdd631ed3 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -108,10 +108,11 @@ struct mce_log {
108#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) 108#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
109#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) 109#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
110 110
111extern struct atomic_notifier_head x86_mce_decoder_chain;
112 111
113#ifdef __KERNEL__ 112#ifdef __KERNEL__
114 113
114extern struct atomic_notifier_head x86_mce_decoder_chain;
115
115#include <linux/percpu.h> 116#include <linux/percpu.h>
116#include <linux/init.h> 117#include <linux/init.h>
117#include <asm/atomic.h> 118#include <asm/atomic.h>
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8d9f8548a870..1380367dabd9 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -19,6 +19,7 @@
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20 20
21#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) 21#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
22#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) 23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
23#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
24#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) 25#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 0c9825e97f36..088d09fb1615 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -205,14 +205,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
205 unsigned long n) 205 unsigned long n)
206{ 206{
207 int sz = __compiletime_object_size(to); 207 int sz = __compiletime_object_size(to);
208 int ret = -EFAULT;
209 208
210 if (likely(sz == -1 || sz >= n)) 209 if (likely(sz == -1 || sz >= n))
211 ret = _copy_from_user(to, from, n); 210 n = _copy_from_user(to, from, n);
212 else 211 else
213 copy_from_user_overflow(); 212 copy_from_user_overflow();
214 213
215 return ret; 214 return n;
216} 215}
217 216
218long __must_check strncpy_from_user(char *dst, const char __user *src, 217long __must_check strncpy_from_user(char *dst, const char __user *src,
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 46324c6a4f6e..535e421498f6 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -30,16 +30,15 @@ static inline unsigned long __must_check copy_from_user(void *to,
30 unsigned long n) 30 unsigned long n)
31{ 31{
32 int sz = __compiletime_object_size(to); 32 int sz = __compiletime_object_size(to);
33 int ret = -EFAULT;
34 33
35 might_fault(); 34 might_fault();
36 if (likely(sz == -1 || sz >= n)) 35 if (likely(sz == -1 || sz >= n))
37 ret = _copy_from_user(to, from, n); 36 n = _copy_from_user(to, from, n);
38#ifdef CONFIG_DEBUG_VM 37#ifdef CONFIG_DEBUG_VM
39 else 38 else
40 WARN(1, "Buffer overflow detected!\n"); 39 WARN(1, "Buffer overflow detected!\n");
41#endif 40#endif
42 return ret; 41 return n;
43} 42}
44 43
45static __always_inline __must_check 44static __always_inline __must_check
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 811bfabc80b7..40be813fefb1 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -31,20 +31,20 @@
31 * contiguous (although various IO spaces may punch holes in 31 * contiguous (although various IO spaces may punch holes in
32 * it).. 32 * it)..
33 * 33 *
34 * N - Number of bits in the node portion of a socket physical 34 * N - Number of bits in the node portion of a socket physical
35 * address. 35 * address.
36 * 36 *
37 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of 37 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
38 * routers always have low bit of 1, C/MBricks have low bit 38 * routers always have low bit of 1, C/MBricks have low bit
39 * equal to 0. Most addressing macros that target UV hub chips 39 * equal to 0. Most addressing macros that target UV hub chips
40 * right shift the NASID by 1 to exclude the always-zero bit. 40 * right shift the NASID by 1 to exclude the always-zero bit.
41 * NASIDs contain up to 15 bits. 41 * NASIDs contain up to 15 bits.
42 * 42 *
43 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead 43 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
44 * of nasids. 44 * of nasids.
45 * 45 *
46 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant 46 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
47 * of the nasid for socket usage. 47 * of the nasid for socket usage.
48 * 48 *
49 * 49 *
50 * NumaLink Global Physical Address Format: 50 * NumaLink Global Physical Address Format:
@@ -71,12 +71,12 @@
71 * 71 *
72 * 72 *
73 * APICID format 73 * APICID format
74 * NOTE!!!!!! This is the current format of the APICID. However, code 74 * NOTE!!!!!! This is the current format of the APICID. However, code
75 * should assume that this will change in the future. Use functions 75 * should assume that this will change in the future. Use functions
76 * in this file for all APICID bit manipulations and conversion. 76 * in this file for all APICID bit manipulations and conversion.
77 * 77 *
78 * 1111110000000000 78 * 1111110000000000
79 * 5432109876543210 79 * 5432109876543210
80 * pppppppppplc0cch 80 * pppppppppplc0cch
81 * sssssssssss 81 * sssssssssss
82 * 82 *
@@ -89,9 +89,9 @@
89 * Note: Processor only supports 12 bits in the APICID register. The ACPI 89 * Note: Processor only supports 12 bits in the APICID register. The ACPI
90 * tables hold all 16 bits. Software needs to be aware of this. 90 * tables hold all 16 bits. Software needs to be aware of this.
91 * 91 *
92 * Unless otherwise specified, all references to APICID refer to 92 * Unless otherwise specified, all references to APICID refer to
93 * the FULL value contained in ACPI tables, not the subset in the 93 * the FULL value contained in ACPI tables, not the subset in the
94 * processor APICID register. 94 * processor APICID register.
95 */ 95 */
96 96
97 97
@@ -151,16 +151,16 @@ struct uv_hub_info_s {
151}; 151};
152 152
153DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 153DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
154#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) 154#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
155#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) 155#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
156 156
157/* 157/*
158 * Local & Global MMR space macros. 158 * Local & Global MMR space macros.
159 * Note: macros are intended to be used ONLY by inline functions 159 * Note: macros are intended to be used ONLY by inline functions
160 * in this file - not by other kernel code. 160 * in this file - not by other kernel code.
161 * n - NASID (full 15-bit global nasid) 161 * n - NASID (full 15-bit global nasid)
162 * g - GNODE (full 15-bit global nasid, right shifted 1) 162 * g - GNODE (full 15-bit global nasid, right shifted 1)
163 * p - PNODE (local part of nsids, right shifted 1) 163 * p - PNODE (local part of nsids, right shifted 1)
164 */ 164 */
165#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) 165#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
166#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) 166#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
@@ -215,8 +215,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
215/* 215/*
216 * Macros for converting between kernel virtual addresses, socket local physical 216 * Macros for converting between kernel virtual addresses, socket local physical
217 * addresses, and UV global physical addresses. 217 * addresses, and UV global physical addresses.
218 * Note: use the standard __pa() & __va() macros for converting 218 * Note: use the standard __pa() & __va() macros for converting
219 * between socket virtual and socket physical addresses. 219 * between socket virtual and socket physical addresses.
220 */ 220 */
221 221
222/* socket phys RAM --> UV global physical address */ 222/* socket phys RAM --> UV global physical address */
@@ -287,21 +287,18 @@ static inline int uv_apicid_to_pnode(int apicid)
287 * Access global MMRs using the low memory MMR32 space. This region supports 287 * Access global MMRs using the low memory MMR32 space. This region supports
288 * faster MMR access but not all MMRs are accessible in this space. 288 * faster MMR access but not all MMRs are accessible in this space.
289 */ 289 */
290static inline unsigned long *uv_global_mmr32_address(int pnode, 290static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
291 unsigned long offset)
292{ 291{
293 return __va(UV_GLOBAL_MMR32_BASE | 292 return __va(UV_GLOBAL_MMR32_BASE |
294 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); 293 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
295} 294}
296 295
297static inline void uv_write_global_mmr32(int pnode, unsigned long offset, 296static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
298 unsigned long val)
299{ 297{
300 writeq(val, uv_global_mmr32_address(pnode, offset)); 298 writeq(val, uv_global_mmr32_address(pnode, offset));
301} 299}
302 300
303static inline unsigned long uv_read_global_mmr32(int pnode, 301static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
304 unsigned long offset)
305{ 302{
306 return readq(uv_global_mmr32_address(pnode, offset)); 303 return readq(uv_global_mmr32_address(pnode, offset));
307} 304}
@@ -310,21 +307,18 @@ static inline unsigned long uv_read_global_mmr32(int pnode,
310 * Access Global MMR space using the MMR space located at the top of physical 307 * Access Global MMR space using the MMR space located at the top of physical
311 * memory. 308 * memory.
312 */ 309 */
313static inline unsigned long *uv_global_mmr64_address(int pnode, 310static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
314 unsigned long offset)
315{ 311{
316 return __va(UV_GLOBAL_MMR64_BASE | 312 return __va(UV_GLOBAL_MMR64_BASE |
317 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); 313 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
318} 314}
319 315
320static inline void uv_write_global_mmr64(int pnode, unsigned long offset, 316static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
321 unsigned long val)
322{ 317{
323 writeq(val, uv_global_mmr64_address(pnode, offset)); 318 writeq(val, uv_global_mmr64_address(pnode, offset));
324} 319}
325 320
326static inline unsigned long uv_read_global_mmr64(int pnode, 321static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
327 unsigned long offset)
328{ 322{
329 return readq(uv_global_mmr64_address(pnode, offset)); 323 return readq(uv_global_mmr64_address(pnode, offset));
330} 324}
@@ -338,6 +332,16 @@ static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long o
338 return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val); 332 return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
339} 333}
340 334
335static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
336{
337 writeb(val, uv_global_mmr64_address(pnode, offset));
338}
339
340static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
341{
342 return readb(uv_global_mmr64_address(pnode, offset));
343}
344
341/* 345/*
342 * Access hub local MMRs. Faster than using global space but only local MMRs 346 * Access hub local MMRs. Faster than using global space but only local MMRs
343 * are accessible. 347 * are accessible.
@@ -457,11 +461,17 @@ static inline void uv_set_scir_bits(unsigned char value)
457 } 461 }
458} 462}
459 463
464static inline unsigned long uv_scir_offset(int apicid)
465{
466 return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
467}
468
460static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) 469static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
461{ 470{
462 if (uv_cpu_hub_info(cpu)->scir.state != value) { 471 if (uv_cpu_hub_info(cpu)->scir.state != value) {
472 uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
473 uv_cpu_hub_info(cpu)->scir.offset, value);
463 uv_cpu_hub_info(cpu)->scir.state = value; 474 uv_cpu_hub_info(cpu)->scir.state = value;
464 uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
465 } 475 }
466} 476}
467 477
@@ -485,5 +495,17 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
485 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 495 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
486} 496}
487 497
498/*
499 * Get the minimum revision number of the hub chips within the partition.
500 * 1 - initial rev 1.0 silicon
501 * 2 - rev 2.0 production silicon
502 */
503static inline int uv_get_min_hub_revision_id(void)
504{
505 extern int uv_min_hub_revision_id;
506
507 return uv_min_hub_revision_id;
508}
509
488#endif /* CONFIG_X86_64 */ 510#endif /* CONFIG_X86_64 */
489#endif /* _ASM_X86_UV_UV_HUB_H */ 511#endif /* _ASM_X86_UV_UV_HUB_H */
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index fd5ca97a2ad5..6f35260bb3ef 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_ACPI) += boot.o
4obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o 4obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o
5 5
6ifneq ($(CONFIG_ACPI_PROCESSOR),) 6ifneq ($(CONFIG_ACPI_PROCESSOR),)
7obj-y += cstate.o processor.o 7obj-y += cstate.o
8endif 8endif
9 9
10$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin 10$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index fb1035cd9a6a..036d28adf59d 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1529,16 +1529,10 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
1529 * if acpi_blacklisted() acpi_disabled = 1; 1529 * if acpi_blacklisted() acpi_disabled = 1;
1530 * acpi_irq_model=... 1530 * acpi_irq_model=...
1531 * ... 1531 * ...
1532 *
1533 * return value: (currently ignored)
1534 * 0: success
1535 * !0: failure
1536 */ 1532 */
1537 1533
1538int __init acpi_boot_table_init(void) 1534void __init acpi_boot_table_init(void)
1539{ 1535{
1540 int error;
1541
1542 dmi_check_system(acpi_dmi_table); 1536 dmi_check_system(acpi_dmi_table);
1543 1537
1544 /* 1538 /*
@@ -1546,15 +1540,14 @@ int __init acpi_boot_table_init(void)
1546 * One exception: acpi=ht continues far enough to enumerate LAPICs 1540 * One exception: acpi=ht continues far enough to enumerate LAPICs
1547 */ 1541 */
1548 if (acpi_disabled && !acpi_ht) 1542 if (acpi_disabled && !acpi_ht)
1549 return 1; 1543 return;
1550 1544
1551 /* 1545 /*
1552 * Initialize the ACPI boot-time table parser. 1546 * Initialize the ACPI boot-time table parser.
1553 */ 1547 */
1554 error = acpi_table_init(); 1548 if (acpi_table_init()) {
1555 if (error) {
1556 disable_acpi(); 1549 disable_acpi();
1557 return error; 1550 return;
1558 } 1551 }
1559 1552
1560 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); 1553 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
@@ -1562,18 +1555,15 @@ int __init acpi_boot_table_init(void)
1562 /* 1555 /*
1563 * blacklist may disable ACPI entirely 1556 * blacklist may disable ACPI entirely
1564 */ 1557 */
1565 error = acpi_blacklisted(); 1558 if (acpi_blacklisted()) {
1566 if (error) {
1567 if (acpi_force) { 1559 if (acpi_force) {
1568 printk(KERN_WARNING PREFIX "acpi=force override\n"); 1560 printk(KERN_WARNING PREFIX "acpi=force override\n");
1569 } else { 1561 } else {
1570 printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); 1562 printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1571 disable_acpi(); 1563 disable_acpi();
1572 return error; 1564 return;
1573 } 1565 }
1574 } 1566 }
1575
1576 return 0;
1577} 1567}
1578 1568
1579int __init early_acpi_boot_init(void) 1569int __init early_acpi_boot_init(void)
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
deleted file mode 100644
index d85d1b2432ba..000000000000
--- a/arch/x86/kernel/acpi/processor.c
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * Copyright (C) 2005 Intel Corporation
3 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
4 * - Added _PDC for platforms with Intel CPUs
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/acpi.h>
11
12#include <acpi/processor.h>
13#include <asm/acpi.h>
14
15static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
16{
17 struct acpi_object_list *obj_list;
18 union acpi_object *obj;
19 u32 *buf;
20
21 /* allocate and initialize pdc. It will be used later. */
22 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
23 if (!obj_list) {
24 printk(KERN_ERR "Memory allocation error\n");
25 return;
26 }
27
28 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
29 if (!obj) {
30 printk(KERN_ERR "Memory allocation error\n");
31 kfree(obj_list);
32 return;
33 }
34
35 buf = kmalloc(12, GFP_KERNEL);
36 if (!buf) {
37 printk(KERN_ERR "Memory allocation error\n");
38 kfree(obj);
39 kfree(obj_list);
40 return;
41 }
42
43 buf[0] = ACPI_PDC_REVISION_ID;
44 buf[1] = 1;
45 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
46
47 /*
48 * The default of PDC_SMP_T_SWCOORD bit is set for intel x86 cpu so
49 * that OSPM is capable of native ACPI throttling software
50 * coordination using BIOS supplied _TSD info.
51 */
52 buf[2] |= ACPI_PDC_SMP_T_SWCOORD;
53 if (cpu_has(c, X86_FEATURE_EST))
54 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
55
56 if (cpu_has(c, X86_FEATURE_ACPI))
57 buf[2] |= ACPI_PDC_T_FFH;
58
59 /*
60 * If mwait/monitor is unsupported, C2/C3_FFH will be disabled
61 */
62 if (!cpu_has(c, X86_FEATURE_MWAIT))
63 buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
64
65 obj->type = ACPI_TYPE_BUFFER;
66 obj->buffer.length = 12;
67 obj->buffer.pointer = (u8 *) buf;
68 obj_list->count = 1;
69 obj_list->pointer = obj;
70 pr->pdc = obj_list;
71
72 return;
73}
74
75
76/* Initialize _PDC data based on the CPU vendor */
77void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
78{
79 struct cpuinfo_x86 *c = &cpu_data(pr->id);
80
81 pr->pdc = NULL;
82 if (c->x86_vendor == X86_VENDOR_INTEL ||
83 c->x86_vendor == X86_VENDOR_CENTAUR)
84 init_intel_pdc(pr, c);
85
86 return;
87}
88
89EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
90
91void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr)
92{
93 if (pr->pdc) {
94 kfree(pr->pdc->pointer->buffer.pointer);
95 kfree(pr->pdc->pointer);
96 kfree(pr->pdc);
97 pr->pdc = NULL;
98 }
99}
100
101EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 82e508677b91..f9961034e557 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -162,6 +162,8 @@ static int __init acpi_sleep_setup(char *str)
162#endif 162#endif
163 if (strncmp(str, "old_ordering", 12) == 0) 163 if (strncmp(str, "old_ordering", 12) == 0)
164 acpi_old_suspend_ordering(); 164 acpi_old_suspend_ordering();
165 if (strncmp(str, "sci_force_enable", 16) == 0)
166 acpi_set_sci_en_on_resume();
165 str = strchr(str, ','); 167 str = strchr(str, ',');
166 if (str != NULL) 168 if (str != NULL)
167 str += strspn(str, ", \t"); 169 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 1dca9c34eaeb..fb490ce7dd55 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -138,6 +138,11 @@ int amd_iommus_present;
138bool amd_iommu_np_cache __read_mostly; 138bool amd_iommu_np_cache __read_mostly;
139 139
140/* 140/*
141 * Set to true if ACPI table parsing and hardware intialization went properly
142 */
143static bool amd_iommu_initialized;
144
145/*
141 * List of protection domains - used during resume 146 * List of protection domains - used during resume
142 */ 147 */
143LIST_HEAD(amd_iommu_pd_list); 148LIST_HEAD(amd_iommu_pd_list);
@@ -929,6 +934,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
929 } 934 }
930 WARN_ON(p != end); 935 WARN_ON(p != end);
931 936
937 amd_iommu_initialized = true;
938
932 return 0; 939 return 0;
933} 940}
934 941
@@ -1263,6 +1270,9 @@ static int __init amd_iommu_init(void)
1263 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1264 goto free; 1271 goto free;
1265 1272
1273 if (!amd_iommu_initialized)
1274 goto free;
1275
1266 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1276 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1267 goto free; 1277 goto free;
1268 1278
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3704997e8b25..f147a95fd84a 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -31,6 +31,7 @@
31#include <asm/x86_init.h> 31#include <asm/x86_init.h>
32 32
33int gart_iommu_aperture; 33int gart_iommu_aperture;
34EXPORT_SYMBOL_GPL(gart_iommu_aperture);
34int gart_iommu_aperture_disabled __initdata; 35int gart_iommu_aperture_disabled __initdata;
35int gart_iommu_aperture_allowed __initdata; 36int gart_iommu_aperture_allowed __initdata;
36 37
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index aa57c079c98f..3987e4408f75 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -61,12 +61,6 @@ unsigned int boot_cpu_physical_apicid = -1U;
61 61
62/* 62/*
63 * The highest APIC ID seen during enumeration. 63 * The highest APIC ID seen during enumeration.
64 *
65 * On AMD, this determines the messaging protocol we can use: if all APIC IDs
66 * are in the 0 ... 7 range, then we can use logical addressing which
67 * has some performance advantages (better broadcasting).
68 *
69 * If there's an APIC ID above 8, we use physical addressing.
70 */ 64 */
71unsigned int max_physical_apicid; 65unsigned int max_physical_apicid;
72 66
@@ -1898,14 +1892,17 @@ void __cpuinit generic_processor_info(int apicid, int version)
1898 max_physical_apicid = apicid; 1892 max_physical_apicid = apicid;
1899 1893
1900#ifdef CONFIG_X86_32 1894#ifdef CONFIG_X86_32
1901 switch (boot_cpu_data.x86_vendor) { 1895 if (num_processors > 8) {
1902 case X86_VENDOR_INTEL: 1896 switch (boot_cpu_data.x86_vendor) {
1903 if (num_processors > 8) 1897 case X86_VENDOR_INTEL:
1904 def_to_bigsmp = 1; 1898 if (!APIC_XAPIC(version)) {
1905 break; 1899 def_to_bigsmp = 0;
1906 case X86_VENDOR_AMD: 1900 break;
1907 if (max_physical_apicid >= 8) 1901 }
1902 /* If P4 and above fall through */
1903 case X86_VENDOR_AMD:
1908 def_to_bigsmp = 1; 1904 def_to_bigsmp = 1;
1905 }
1909 } 1906 }
1910#endif 1907#endif
1911 1908
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index eacbd2b31d27..e3c3d820c325 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -240,6 +240,11 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
240 printk(KERN_DEBUG "system APIC only can use physical flat"); 240 printk(KERN_DEBUG "system APIC only can use physical flat");
241 return 1; 241 return 1;
242 } 242 }
243
244 if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
245 printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
246 return 1;
247 }
243#endif 248#endif
244 249
245 return 0; 250 return 0;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index de00c4619a55..53243ca7816d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2434,6 +2434,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2434 cfg = irq_cfg(irq); 2434 cfg = irq_cfg(irq);
2435 raw_spin_lock(&desc->lock); 2435 raw_spin_lock(&desc->lock);
2436 2436
2437 /*
2438 * Check if the irq migration is in progress. If so, we
2439 * haven't received the cleanup request yet for this irq.
2440 */
2441 if (cfg->move_in_progress)
2442 goto unlock;
2443
2437 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2444 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2438 goto unlock; 2445 goto unlock;
2439 2446
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index c4cbd3080c1c..450fe2064a14 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -67,17 +67,8 @@ void __init default_setup_apic_routing(void)
67 } 67 }
68#endif 68#endif
69 69
70 if (apic == &apic_flat) { 70 if (apic == &apic_flat && num_processors > 8)
71 switch (boot_cpu_data.x86_vendor) { 71 apic = &apic_physflat;
72 case X86_VENDOR_INTEL:
73 if (num_processors > 8)
74 apic = &apic_physflat;
75 break;
76 case X86_VENDOR_AMD:
77 if (max_physical_apicid >= 8)
78 apic = &apic_physflat;
79 }
80 }
81 72
82 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 73 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
83 74
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d56b0efb2057..21db3cbea7dc 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -36,6 +36,8 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
36 36
37static enum uv_system_type uv_system_type; 37static enum uv_system_type uv_system_type;
38static u64 gru_start_paddr, gru_end_paddr; 38static u64 gru_start_paddr, gru_end_paddr;
39int uv_min_hub_revision_id;
40EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
39 41
40static inline bool is_GRU_range(u64 start, u64 end) 42static inline bool is_GRU_range(u64 start, u64 end)
41{ 43{
@@ -55,12 +57,19 @@ static int early_get_nodeid(void)
55 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); 57 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
56 node_id.v = *mmr; 58 node_id.v = *mmr;
57 early_iounmap(mmr, sizeof(*mmr)); 59 early_iounmap(mmr, sizeof(*mmr));
60
61 /* Currently, all blades have same revision number */
62 uv_min_hub_revision_id = node_id.s.revision;
63
58 return node_id.s.node_id; 64 return node_id.s.node_id;
59} 65}
60 66
61static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 67static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
62{ 68{
69 int nodeid;
70
63 if (!strcmp(oem_id, "SGI")) { 71 if (!strcmp(oem_id, "SGI")) {
72 nodeid = early_get_nodeid();
64 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 73 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
65 if (!strcmp(oem_table_id, "UVL")) 74 if (!strcmp(oem_table_id, "UVL"))
66 uv_system_type = UV_LEGACY_APIC; 75 uv_system_type = UV_LEGACY_APIC;
@@ -68,7 +77,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
68 uv_system_type = UV_X2APIC; 77 uv_system_type = UV_X2APIC;
69 else if (!strcmp(oem_table_id, "UVH")) { 78 else if (!strcmp(oem_table_id, "UVH")) {
70 __get_cpu_var(x2apic_extra_bits) = 79 __get_cpu_var(x2apic_extra_bits) =
71 early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1); 80 nodeid << (UV_APIC_PNODE_SHIFT - 1);
72 uv_system_type = UV_NON_UNIQUE_APIC; 81 uv_system_type = UV_NON_UNIQUE_APIC;
73 return 1; 82 return 1;
74 } 83 }
@@ -374,13 +383,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
374 383
375enum map_type {map_wb, map_uc}; 384enum map_type {map_wb, map_uc};
376 385
377static __init void map_high(char *id, unsigned long base, int shift, 386static __init void map_high(char *id, unsigned long base, int pshift,
378 int max_pnode, enum map_type map_type) 387 int bshift, int max_pnode, enum map_type map_type)
379{ 388{
380 unsigned long bytes, paddr; 389 unsigned long bytes, paddr;
381 390
382 paddr = base << shift; 391 paddr = base << pshift;
383 bytes = (1UL << shift) * (max_pnode + 1); 392 bytes = (1UL << bshift) * (max_pnode + 1);
384 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 393 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
385 paddr + bytes); 394 paddr + bytes);
386 if (map_type == map_uc) 395 if (map_type == map_uc)
@@ -396,7 +405,7 @@ static __init void map_gru_high(int max_pnode)
396 405
397 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 406 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
398 if (gru.s.enable) { 407 if (gru.s.enable) {
399 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 408 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
400 gru_start_paddr = ((u64)gru.s.base << shift); 409 gru_start_paddr = ((u64)gru.s.base << shift);
401 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); 410 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
402 411
@@ -410,7 +419,7 @@ static __init void map_mmr_high(int max_pnode)
410 419
411 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 420 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
412 if (mmr.s.enable) 421 if (mmr.s.enable)
413 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); 422 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
414} 423}
415 424
416static __init void map_mmioh_high(int max_pnode) 425static __init void map_mmioh_high(int max_pnode)
@@ -420,7 +429,8 @@ static __init void map_mmioh_high(int max_pnode)
420 429
421 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 430 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
422 if (mmioh.s.enable) 431 if (mmioh.s.enable)
423 map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); 432 map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
433 max_pnode, map_uc);
424} 434}
425 435
426static __init void map_low_mmrs(void) 436static __init void map_low_mmrs(void)
@@ -629,8 +639,10 @@ void __init uv_system_init(void)
629 uv_rtc_init(); 639 uv_rtc_init();
630 640
631 for_each_present_cpu(cpu) { 641 for_each_present_cpu(cpu) {
642 int apicid = per_cpu(x86_cpu_to_apicid, cpu);
643
632 nid = cpu_to_node(cpu); 644 nid = cpu_to_node(cpu);
633 pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); 645 pnode = uv_apicid_to_pnode(apicid);
634 blade = boot_pnode_to_blade(pnode); 646 blade = boot_pnode_to_blade(pnode);
635 lcpu = uv_blade_info[blade].nr_possible_cpus; 647 lcpu = uv_blade_info[blade].nr_possible_cpus;
636 uv_blade_info[blade].nr_possible_cpus++; 648 uv_blade_info[blade].nr_possible_cpus++;
@@ -651,15 +663,13 @@ void __init uv_system_init(void)
651 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; 663 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
652 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 664 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
653 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; 665 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
654 uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; 666 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
655 uv_node_to_blade[nid] = blade; 667 uv_node_to_blade[nid] = blade;
656 uv_cpu_to_blade[cpu] = blade; 668 uv_cpu_to_blade[cpu] = blade;
657 max_pnode = max(pnode, max_pnode); 669 max_pnode = max(pnode, max_pnode);
658 670
659 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " 671 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
660 "lcpu %d, blade %d\n", 672 cpu, apicid, pnode, nid, lcpu, blade);
661 cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
662 lcpu, blade);
663 } 673 }
664 674
665 /* Add blade/pnode info for nodes without cpus */ 675 /* Add blade/pnode info for nodes without cpus */
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index f28decf8dde3..1b1920fa7c80 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -190,9 +190,11 @@ static void do_drv_write(void *_cmd)
190 190
191static void drv_read(struct drv_cmd *cmd) 191static void drv_read(struct drv_cmd *cmd)
192{ 192{
193 int err;
193 cmd->val = 0; 194 cmd->val = 0;
194 195
195 smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1); 196 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
197 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
196} 198}
197 199
198static void drv_write(struct drv_cmd *cmd) 200static void drv_write(struct drv_cmd *cmd)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index c223b7e895d9..8c1c07073ccc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1343,6 +1343,13 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1343 bits |= 0x2; 1343 bits |= 0x2;
1344 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) 1344 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1345 bits |= 0x1; 1345 bits |= 0x1;
1346
1347 /*
1348 * ANY bit is supported in v3 and up
1349 */
1350 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1351 bits |= 0x4;
1352
1346 bits <<= (idx * 4); 1353 bits <<= (idx * 4);
1347 mask = 0xfULL << (idx * 4); 1354 mask = 0xfULL << (idx * 4);
1348 1355
@@ -2347,7 +2354,7 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2347 callchain_store(entry, PERF_CONTEXT_KERNEL); 2354 callchain_store(entry, PERF_CONTEXT_KERNEL);
2348 callchain_store(entry, regs->ip); 2355 callchain_store(entry, regs->ip);
2349 2356
2350 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); 2357 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2351} 2358}
2352 2359
2353/* 2360/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index c56bc2873030..6d817554780a 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -123,13 +123,15 @@ print_context_stack_bp(struct thread_info *tinfo,
123 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { 123 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
124 unsigned long addr = *ret_addr; 124 unsigned long addr = *ret_addr;
125 125
126 if (__kernel_text_address(addr)) { 126 if (!__kernel_text_address(addr))
127 ops->address(data, addr, 1); 127 break;
128 frame = frame->next_frame; 128
129 ret_addr = &frame->return_address; 129 ops->address(data, addr, 1);
130 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 130 frame = frame->next_frame;
131 } 131 ret_addr = &frame->return_address;
132 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
132 } 133 }
134
133 return (unsigned long)frame; 135 return (unsigned long)frame;
134} 136}
135EXPORT_SYMBOL_GPL(print_context_stack_bp); 137EXPORT_SYMBOL_GPL(print_context_stack_bp);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 05ed7ab2ca48..a1a7876cadcb 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -733,13 +733,13 @@ struct early_res {
733}; 733};
734static struct early_res early_res[MAX_EARLY_RES] __initdata = { 734static struct early_res early_res[MAX_EARLY_RES] __initdata = {
735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */ 735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
736#ifdef CONFIG_X86_32 736#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
737 /* 737 /*
738 * But first pinch a few for the stack/trampoline stuff 738 * But first pinch a few for the stack/trampoline stuff
739 * FIXME: Don't need the extra page at 4K, but need to fix 739 * FIXME: Don't need the extra page at 4K, but need to fix
740 * trampoline before removing it. (see the GDT stuff) 740 * trampoline before removing it. (see the GDT stuff)
741 */ 741 */
742 { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 }, 742 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
743#endif 743#endif
744 744
745 {} 745 {}
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 844c02c65fcb..0c8632433090 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -394,7 +394,7 @@ static enum ucode_state microcode_update_cpu(int cpu)
394 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 394 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
395 enum ucode_state ustate; 395 enum ucode_state ustate;
396 396
397 if (uci->valid && uci->mc) 397 if (uci->valid)
398 ustate = microcode_resume_cpu(cpu); 398 ustate = microcode_resume_cpu(cpu);
399 else 399 else
400 ustate = microcode_init_cpu(cpu); 400 ustate = microcode_init_cpu(cpu);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 98c2cdeb599e..02c3ee013ccd 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -103,8 +103,8 @@ void show_regs_common(void)
103 if (!product) 103 if (!product)
104 product = ""; 104 product = "";
105 105
106 printk("\n"); 106 printk(KERN_CONT "\n");
107 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", 107 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
108 current->pid, current->comm, print_tainted(), 108 current->pid, current->comm, print_tainted(),
109 init_utsname()->release, 109 init_utsname()->release,
110 (int)strcspn(init_utsname()->version, " "), 110 (int)strcspn(init_utsname()->version, " "),
@@ -288,6 +288,8 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
288 regs.es = __USER_DS; 288 regs.es = __USER_DS;
289 regs.fs = __KERNEL_PERCPU; 289 regs.fs = __KERNEL_PERCPU;
290 regs.gs = __KERNEL_STACK_CANARY; 290 regs.gs = __KERNEL_STACK_CANARY;
291#else
292 regs.ss = __KERNEL_DS;
291#endif 293#endif
292 294
293 regs.orig_ax = -1; 295 regs.orig_ax = -1;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9c517b5858f0..37ad1e046aae 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -139,16 +139,16 @@ void __show_regs(struct pt_regs *regs, int all)
139 139
140 show_regs_common(); 140 show_regs_common();
141 141
142 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 142 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
143 (u16)regs->cs, regs->ip, regs->flags, 143 (u16)regs->cs, regs->ip, regs->flags,
144 smp_processor_id()); 144 smp_processor_id());
145 print_symbol("EIP is at %s\n", regs->ip); 145 print_symbol("EIP is at %s\n", regs->ip);
146 146
147 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 147 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
148 regs->ax, regs->bx, regs->cx, regs->dx); 148 regs->ax, regs->bx, regs->cx, regs->dx);
149 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", 149 printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
150 regs->si, regs->di, regs->bp, sp); 150 regs->si, regs->di, regs->bp, sp);
151 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", 151 printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
152 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); 152 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
153 153
154 if (!all) 154 if (!all)
@@ -158,19 +158,19 @@ void __show_regs(struct pt_regs *regs, int all)
158 cr2 = read_cr2(); 158 cr2 = read_cr2();
159 cr3 = read_cr3(); 159 cr3 = read_cr3();
160 cr4 = read_cr4_safe(); 160 cr4 = read_cr4_safe();
161 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", 161 printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
162 cr0, cr2, cr3, cr4); 162 cr0, cr2, cr3, cr4);
163 163
164 get_debugreg(d0, 0); 164 get_debugreg(d0, 0);
165 get_debugreg(d1, 1); 165 get_debugreg(d1, 1);
166 get_debugreg(d2, 2); 166 get_debugreg(d2, 2);
167 get_debugreg(d3, 3); 167 get_debugreg(d3, 3);
168 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 168 printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
169 d0, d1, d2, d3); 169 d0, d1, d2, d3);
170 170
171 get_debugreg(d6, 6); 171 get_debugreg(d6, 6);
172 get_debugreg(d7, 7); 172 get_debugreg(d7, 7);
173 printk("DR6: %08lx DR7: %08lx\n", 173 printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
174 d6, d7); 174 d6, d7);
175} 175}
176 176
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 52fbd0c60198..f9e033150cdf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -161,19 +161,19 @@ void __show_regs(struct pt_regs *regs, int all)
161 unsigned int ds, cs, es; 161 unsigned int ds, cs, es;
162 162
163 show_regs_common(); 163 show_regs_common();
164 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 164 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
165 printk_address(regs->ip, 1); 165 printk_address(regs->ip, 1);
166 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, 166 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
167 regs->sp, regs->flags); 167 regs->sp, regs->flags);
168 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", 168 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
169 regs->ax, regs->bx, regs->cx); 169 regs->ax, regs->bx, regs->cx);
170 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", 170 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
171 regs->dx, regs->si, regs->di); 171 regs->dx, regs->si, regs->di);
172 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", 172 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
173 regs->bp, regs->r8, regs->r9); 173 regs->bp, regs->r8, regs->r9);
174 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", 174 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
175 regs->r10, regs->r11, regs->r12); 175 regs->r10, regs->r11, regs->r12);
176 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", 176 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
177 regs->r13, regs->r14, regs->r15); 177 regs->r13, regs->r14, regs->r15);
178 178
179 asm("movl %%ds,%0" : "=r" (ds)); 179 asm("movl %%ds,%0" : "=r" (ds));
@@ -194,21 +194,21 @@ void __show_regs(struct pt_regs *regs, int all)
194 cr3 = read_cr3(); 194 cr3 = read_cr3();
195 cr4 = read_cr4(); 195 cr4 = read_cr4();
196 196
197 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 197 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
198 fs, fsindex, gs, gsindex, shadowgs); 198 fs, fsindex, gs, gsindex, shadowgs);
199 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, 199 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
200 es, cr0); 200 es, cr0);
201 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, 201 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
202 cr4); 202 cr4);
203 203
204 get_debugreg(d0, 0); 204 get_debugreg(d0, 0);
205 get_debugreg(d1, 1); 205 get_debugreg(d1, 1);
206 get_debugreg(d2, 2); 206 get_debugreg(d2, 2);
207 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 207 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
208 get_debugreg(d3, 3); 208 get_debugreg(d3, 3);
209 get_debugreg(d6, 6); 209 get_debugreg(d6, 6);
210 get_debugreg(d7, 7); 210 get_debugreg(d7, 7);
211 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 211 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
212} 212}
213 213
214void show_regs(struct pt_regs *regs) 214void show_regs(struct pt_regs *regs)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cd60c0bd1b32..3063a0c4858b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1150 hrtimer_cancel(&apic->lapic_timer.timer); 1150 hrtimer_cancel(&apic->lapic_timer.timer);
1151 update_divide_count(apic); 1151 update_divide_count(apic);
1152 start_apic_timer(apic); 1152 start_apic_timer(apic);
1153 apic->irr_pending = true;
1153} 1154}
1154 1155
1155void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) 1156void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a6017132fba8..58a0f1e88596 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -455,8 +455,6 @@ out_unlock:
455static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 455static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
456{ 456{
457 struct kvm_shadow_walk_iterator iterator; 457 struct kvm_shadow_walk_iterator iterator;
458 pt_element_t gpte;
459 gpa_t pte_gpa = -1;
460 int level; 458 int level;
461 u64 *sptep; 459 u64 *sptep;
462 int need_flush = 0; 460 int need_flush = 0;
@@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
470 if (level == PT_PAGE_TABLE_LEVEL || 468 if (level == PT_PAGE_TABLE_LEVEL ||
471 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || 469 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
472 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { 470 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
473 struct kvm_mmu_page *sp = page_header(__pa(sptep));
474
475 pte_gpa = (sp->gfn << PAGE_SHIFT);
476 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
477 471
478 if (is_shadow_present_pte(*sptep)) { 472 if (is_shadow_present_pte(*sptep)) {
479 rmap_remove(vcpu->kvm, sptep); 473 rmap_remove(vcpu->kvm, sptep);
@@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
492 if (need_flush) 486 if (need_flush)
493 kvm_flush_remote_tlbs(vcpu->kvm); 487 kvm_flush_remote_tlbs(vcpu->kvm);
494 spin_unlock(&vcpu->kvm->mmu_lock); 488 spin_unlock(&vcpu->kvm->mmu_lock);
495
496 if (pte_gpa == -1)
497 return;
498 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
499 sizeof(pt_element_t)))
500 return;
501 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
502 if (mmu_topup_memory_caches(vcpu))
503 return;
504 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
505 sizeof(pt_element_t), 0);
506 }
507} 489}
508 490
509static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 491static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9d068966fb2a..6651dbf58675 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1913 1913
1914 events->sipi_vector = vcpu->arch.sipi_vector; 1914 events->sipi_vector = vcpu->arch.sipi_vector;
1915 1915
1916 events->flags = 0; 1916 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
1917 | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
1917 1918
1918 vcpu_put(vcpu); 1919 vcpu_put(vcpu);
1919} 1920}
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
1921static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 1922static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1922 struct kvm_vcpu_events *events) 1923 struct kvm_vcpu_events *events)
1923{ 1924{
1924 if (events->flags) 1925 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
1926 | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
1925 return -EINVAL; 1927 return -EINVAL;
1926 1928
1927 vcpu_load(vcpu); 1929 vcpu_load(vcpu);
@@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
1938 kvm_pic_clear_isr_ack(vcpu->kvm); 1940 kvm_pic_clear_isr_ack(vcpu->kvm);
1939 1941
1940 vcpu->arch.nmi_injected = events->nmi.injected; 1942 vcpu->arch.nmi_injected = events->nmi.injected;
1941 vcpu->arch.nmi_pending = events->nmi.pending; 1943 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
1944 vcpu->arch.nmi_pending = events->nmi.pending;
1942 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); 1945 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
1943 1946
1944 vcpu->arch.sipi_vector = events->sipi_vector; 1947 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
1948 vcpu->arch.sipi_vector = events->sipi_vector;
1945 1949
1946 vcpu_put(vcpu); 1950 vcpu_put(vcpu);
1947 1951
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c973f8e2a6cf..9a0c258a86be 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -892,8 +892,7 @@ void __init mem_init(void)
892 reservedpages << (PAGE_SHIFT-10), 892 reservedpages << (PAGE_SHIFT-10),
893 datasize >> 10, 893 datasize >> 10,
894 initsize >> 10, 894 initsize >> 10,
895 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) 895 totalhigh_pages << (PAGE_SHIFT-10));
896 );
897 896
898 printk(KERN_INFO "virtual kernel memory layout:\n" 897 printk(KERN_INFO "virtual kernel memory layout:\n"
899 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 898 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
index 4901d0dafda6..af3b6c8a436f 100644
--- a/arch/x86/mm/kmemcheck/error.c
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -106,26 +106,25 @@ void kmemcheck_error_recall(void)
106 106
107 switch (e->type) { 107 switch (e->type) {
108 case KMEMCHECK_ERROR_INVALID_ACCESS: 108 case KMEMCHECK_ERROR_INVALID_ACCESS:
109 printk(KERN_ERR "WARNING: kmemcheck: Caught %d-bit read " 109 printk(KERN_WARNING "WARNING: kmemcheck: Caught %d-bit read from %s memory (%p)\n",
110 "from %s memory (%p)\n",
111 8 * e->size, e->state < ARRAY_SIZE(desc) ? 110 8 * e->size, e->state < ARRAY_SIZE(desc) ?
112 desc[e->state] : "(invalid shadow state)", 111 desc[e->state] : "(invalid shadow state)",
113 (void *) e->address); 112 (void *) e->address);
114 113
115 printk(KERN_INFO); 114 printk(KERN_WARNING);
116 for (i = 0; i < SHADOW_COPY_SIZE; ++i) 115 for (i = 0; i < SHADOW_COPY_SIZE; ++i)
117 printk("%02x", e->memory_copy[i]); 116 printk(KERN_CONT "%02x", e->memory_copy[i]);
118 printk("\n"); 117 printk(KERN_CONT "\n");
119 118
120 printk(KERN_INFO); 119 printk(KERN_WARNING);
121 for (i = 0; i < SHADOW_COPY_SIZE; ++i) { 120 for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
122 if (e->shadow_copy[i] < ARRAY_SIZE(short_desc)) 121 if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
123 printk(" %c", short_desc[e->shadow_copy[i]]); 122 printk(KERN_CONT " %c", short_desc[e->shadow_copy[i]]);
124 else 123 else
125 printk(" ?"); 124 printk(KERN_CONT " ?");
126 } 125 }
127 printk("\n"); 126 printk(KERN_CONT "\n");
128 printk(KERN_INFO "%*c\n", 2 + 2 127 printk(KERN_WARNING "%*c\n", 2 + 2
129 * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); 128 * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
130 break; 129 break;
131 case KMEMCHECK_ERROR_BUG: 130 case KMEMCHECK_ERROR_BUG:
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index c0f6198565eb..536fb6823366 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -538,14 +538,15 @@ static int
538kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) 538kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
539{ 539{
540 struct die_args *arg = args; 540 struct die_args *arg = args;
541 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
541 542
542 if (val == DIE_DEBUG && (arg->err & DR_STEP)) 543 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
543 if (post_kmmio_handler(arg->err, arg->regs) == 1) { 544 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
544 /* 545 /*
545 * Reset the BS bit in dr6 (pointed by args->err) to 546 * Reset the BS bit in dr6 (pointed by args->err) to
546 * denote completion of processing 547 * denote completion of processing
547 */ 548 */
548 (*(unsigned long *)ERR_PTR(arg->err)) &= ~DR_STEP; 549 *dr6_p &= ~DR_STEP;
549 return NOTIFY_STOP; 550 return NOTIFY_STOP;
550 } 551 }
551 552
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 145df00e0387..f939d603adfa 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -51,7 +51,7 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
51 } 51 }
52} 52}
53 53
54void __init update_res(struct pci_root_info *info, size_t start, 54void __devinit update_res(struct pci_root_info *info, size_t start,
55 size_t end, unsigned long flags, int merge) 55 size_t end, unsigned long flags, int merge)
56{ 56{
57 int i; 57 int i;
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c
index b7a55dc55d13..f81a2fa8fe25 100644
--- a/arch/x86/pci/intel_bus.c
+++ b/arch/x86/pci/intel_bus.c
@@ -49,6 +49,10 @@ static void __devinit pci_root_bus_res(struct pci_dev *dev)
49 u64 mmioh_base, mmioh_end; 49 u64 mmioh_base, mmioh_end;
50 int bus_base, bus_end; 50 int bus_base, bus_end;
51 51
52 /* some sys doesn't get mmconf enabled */
53 if (dev->cfg_size < 0x120)
54 return;
55
52 if (pci_root_num >= PCI_ROOT_NR) { 56 if (pci_root_num >= PCI_ROOT_NR) {
53 printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n"); 57 printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
54 return; 58 return;
diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk
index 5bbb5a33f220..fd1ab80be0de 100644
--- a/arch/x86/tools/chkobjdump.awk
+++ b/arch/x86/tools/chkobjdump.awk
@@ -8,14 +8,24 @@ BEGIN {
8 od_sver = 19; 8 od_sver = 19;
9} 9}
10 10
11/^GNU/ { 11/^GNU objdump/ {
12 split($3, ver, "."); 12 verstr = ""
13 for (i = 3; i <= NF; i++)
14 if (match($(i), "^[0-9]")) {
15 verstr = $(i);
16 break;
17 }
18 if (verstr == "") {
19 printf("Warning: Failed to find objdump version number.\n");
20 exit 0;
21 }
22 split(verstr, ver, ".");
13 if (ver[1] > od_ver || 23 if (ver[1] > od_ver ||
14 (ver[1] == od_ver && ver[2] >= od_sver)) { 24 (ver[1] == od_ver && ver[2] >= od_sver)) {
15 exit 1; 25 exit 1;
16 } else { 26 } else {
17 printf("Warning: objdump version %s is older than %d.%d\n", 27 printf("Warning: objdump version %s is older than %d.%d\n",
18 $4, od_ver, od_sver); 28 verstr, od_ver, od_sver);
19 print("Warning: Skipping posttest."); 29 print("Warning: Skipping posttest.");
20 # Logic is inverted, because we just skip test without error. 30 # Logic is inverted, because we just skip test without error.
21 exit 0; 31 exit 0;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2b26dd5930c6..36daccb68642 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1151,9 +1151,13 @@ asmlinkage void __init xen_start_kernel(void)
1151 1151
1152 /* keep using Xen gdt for now; no urgent need to change it */ 1152 /* keep using Xen gdt for now; no urgent need to change it */
1153 1153
1154#ifdef CONFIG_X86_32
1154 pv_info.kernel_rpl = 1; 1155 pv_info.kernel_rpl = 1;
1155 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1156 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1156 pv_info.kernel_rpl = 0; 1157 pv_info.kernel_rpl = 0;
1158#else
1159 pv_info.kernel_rpl = 0;
1160#endif
1157 1161
1158 /* set the limit of our address space */ 1162 /* set the limit of our address space */
1159 xen_reserve_top(); 1163 xen_reserve_top();