aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/Kconfig.cpu1
-rw-r--r--arch/x86/boot/tty.c2
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h24
-rw-r--r--arch/x86/include/asm/dma-mapping.h6
-rw-r--r--arch/x86/include/asm/ds.h6
-rw-r--r--arch/x86/include/asm/iomap.h30
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/mmzone_32.h4
-rw-r--r--arch/x86/include/asm/pci_64.h14
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/include/asm/vmi.h8
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/amd_iommu.c56
-rw-r--r--arch/x86/kernel/amd_iommu_init.c13
-rw-r--r--arch/x86/kernel/apic.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c18
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h17
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c3
-rw-r--r--arch/x86/kernel/ds.c81
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/es7000_32.c9
-rw-r--r--arch/x86/kernel/hpet.c11
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/io_apic.c62
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/microcode_core.c19
-rw-r--r--arch/x86/kernel/microcode_intel.c6
-rw-r--r--arch/x86/kernel/mpparse.c3
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c3
-rw-r--r--arch/x86/kernel/pci-calgary_64.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c6
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c9
-rw-r--r--arch/x86/kernel/setup.c14
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c4
-rw-r--r--arch/x86/kernel/vmi_32.c16
-rw-r--r--arch/x86/kernel/xsave.c2
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/i8254.c4
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h1
-rw-r--r--arch/x86/kvm/vmx.c7
-rw-r--r--arch/x86/kvm/vmx.h1
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c16
-rw-r--r--arch/x86/mm/numa_32.c35
-rw-r--r--arch/x86/oprofile/nmi_int.c5
-rw-r--r--arch/x86/oprofile/op_model_ppro.c6
-rw-r--r--arch/x86/pci/fixup.c25
-rw-r--r--arch/x86/power/hibernate_32.c4
-rw-r--r--arch/x86/xen/mmu.c21
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
59 files changed, 404 insertions, 230 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4cf0ab13d187..19f0d97829ee 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ
167config X86_SMP 167config X86_SMP
168 bool 168 bool
169 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) 169 depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
170 select USE_GENERIC_SMP_HELPERS
171 default y 170 default y
172 171
172config USE_GENERIC_SMP_HELPERS
173 def_bool y
174 depends on SMP
175
173config X86_32_SMP 176config X86_32_SMP
174 def_bool y 177 def_bool y
175 depends on X86_32 && SMP 178 depends on X86_32 && SMP
@@ -479,7 +482,7 @@ config HPET_TIMER
479 The HPET provides a stable time base on SMP 482 The HPET provides a stable time base on SMP
480 systems, unlike the TSC, but it is more expensive to access, 483 systems, unlike the TSC, but it is more expensive to access,
481 as it is off-chip. You can find the HPET spec at 484 as it is off-chip. You can find the HPET spec at
482 <http://www.intel.com/hardwaredesign/hpetspec.htm>. 485 <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
483 486
484 You can safely choose Y here. However, HPET will only be 487 You can safely choose Y here. However, HPET will only be
485 activated if the platform and the BIOS support this feature. 488 activated if the platform and the BIOS support this feature.
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index b815664fe370..8e99073b9e0f 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -520,6 +520,7 @@ config X86_PTRACE_BTS
520 bool "Branch Trace Store" 520 bool "Branch Trace Store"
521 default y 521 default y
522 depends on X86_DEBUGCTLMSR 522 depends on X86_DEBUGCTLMSR
523 depends on BROKEN
523 help 524 help
524 This adds a ptrace interface to the hardware's branch trace store. 525 This adds a ptrace interface to the hardware's branch trace store.
525 526
diff --git a/arch/x86/boot/tty.c b/arch/x86/boot/tty.c
index 0be77b39328a..7e8e8b25f5f6 100644
--- a/arch/x86/boot/tty.c
+++ b/arch/x86/boot/tty.c
@@ -74,7 +74,7 @@ static int kbd_pending(void)
74{ 74{
75 u8 pending; 75 u8 pending;
76 asm volatile("int $0x16; setnz %0" 76 asm volatile("int $0x16; setnz %0"
77 : "=rm" (pending) 77 : "=qm" (pending)
78 : "a" (0x0100)); 78 : "a" (0x0100));
79 return pending; 79 return pending;
80} 80}
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 8d676d8ecde9..9830681446ad 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -113,7 +113,6 @@ static inline void acpi_disable_pci(void)
113 acpi_pci_disabled = 1; 113 acpi_pci_disabled = 1;
114 acpi_noirq_set(); 114 acpi_noirq_set();
115} 115}
116extern int acpi_irq_balance_set(char *str);
117 116
118/* routines for saving/restoring kernel state */ 117/* routines for saving/restoring kernel state */
119extern int acpi_save_state_mem(void); 118extern int acpi_save_state_mem(void);
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 1a30c0440c6b..ac302a2fa339 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -251,13 +251,6 @@ struct amd_iommu {
251 /* Pointer to PCI device of this IOMMU */ 251 /* Pointer to PCI device of this IOMMU */
252 struct pci_dev *dev; 252 struct pci_dev *dev;
253 253
254 /*
255 * Capability pointer. There could be more than one IOMMU per PCI
256 * device function if there are more than one AMD IOMMU capability
257 * pointers.
258 */
259 u16 cap_ptr;
260
261 /* physical address of MMIO space */ 254 /* physical address of MMIO space */
262 u64 mmio_phys; 255 u64 mmio_phys;
263 /* virtual address of MMIO space */ 256 /* virtual address of MMIO space */
@@ -266,6 +259,13 @@ struct amd_iommu {
266 /* capabilities of that IOMMU read from ACPI */ 259 /* capabilities of that IOMMU read from ACPI */
267 u32 cap; 260 u32 cap;
268 261
262 /*
263 * Capability pointer. There could be more than one IOMMU per PCI
264 * device function if there are more than one AMD IOMMU capability
265 * pointers.
266 */
267 u16 cap_ptr;
268
269 /* pci domain of this IOMMU */ 269 /* pci domain of this IOMMU */
270 u16 pci_seg; 270 u16 pci_seg;
271 271
@@ -284,19 +284,19 @@ struct amd_iommu {
284 /* size of command buffer */ 284 /* size of command buffer */
285 u32 cmd_buf_size; 285 u32 cmd_buf_size;
286 286
287 /* event buffer virtual address */
288 u8 *evt_buf;
289 /* size of event buffer */ 287 /* size of event buffer */
290 u32 evt_buf_size; 288 u32 evt_buf_size;
289 /* event buffer virtual address */
290 u8 *evt_buf;
291 /* MSI number for event interrupt */ 291 /* MSI number for event interrupt */
292 u16 evt_msi_num; 292 u16 evt_msi_num;
293 293
294 /* if one, we need to send a completion wait command */
295 int need_sync;
296
297 /* true if interrupts for this IOMMU are already enabled */ 294 /* true if interrupts for this IOMMU are already enabled */
298 bool int_enabled; 295 bool int_enabled;
299 296
297 /* if one, we need to send a completion wait command */
298 int need_sync;
299
300 /* default dma_ops domain for that IOMMU */ 300 /* default dma_ops domain for that IOMMU */
301 struct dma_ops_domain *default_dom; 301 struct dma_ops_domain *default_dom;
302}; 302};
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 7f225a4b2a26..097794ff6b79 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
71/* Make sure we keep the same behaviour */ 71/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{ 73{
74#ifdef CONFIG_X86_32 74#ifdef CONFIG_X86_64
75 return 0;
76#else
77 struct dma_mapping_ops *ops = get_dma_ops(dev); 75 struct dma_mapping_ops *ops = get_dma_ops(dev);
78 if (ops->mapping_error) 76 if (ops->mapping_error)
79 return ops->mapping_error(dev, dma_addr); 77 return ops->mapping_error(dev, dma_addr);
80 78
81 return (dma_addr == bad_dma_address);
82#endif 79#endif
80 return (dma_addr == bad_dma_address);
83} 81}
84 82
85#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 83#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/x86/include/asm/ds.h b/arch/x86/include/asm/ds.h
index 72c5a190bf48..a95008457ea4 100644
--- a/arch/x86/include/asm/ds.h
+++ b/arch/x86/include/asm/ds.h
@@ -23,12 +23,13 @@
23#ifndef _ASM_X86_DS_H 23#ifndef _ASM_X86_DS_H
24#define _ASM_X86_DS_H 24#define _ASM_X86_DS_H
25 25
26#ifdef CONFIG_X86_DS
27 26
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/init.h> 28#include <linux/init.h>
30 29
31 30
31#ifdef CONFIG_X86_DS
32
32struct task_struct; 33struct task_struct;
33 34
34/* 35/*
@@ -232,7 +233,8 @@ extern void ds_free(struct ds_context *context);
232 233
233#else /* CONFIG_X86_DS */ 234#else /* CONFIG_X86_DS */
234 235
235#define ds_init_intel(config) do {} while (0) 236struct cpuinfo_x86;
237static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
236 238
237#endif /* CONFIG_X86_DS */ 239#endif /* CONFIG_X86_DS */
238#endif /* _ASM_X86_DS_H */ 240#endif /* _ASM_X86_DS_H */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
new file mode 100644
index 000000000000..c1f06289b14b
--- /dev/null
+++ b/arch/x86/include/asm/iomap.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright © 2008 Ingo Molnar
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/uaccess.h>
22#include <asm/cacheflush.h>
23#include <asm/pgtable.h>
24#include <asm/tlbflush.h>
25
26void *
27iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
28
29void
30iounmap_atomic(void *kvaddr, enum km_type type);
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index e4a552d44465..0b500c5b6446 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,7 +6,6 @@ extern void no_iommu_init(void);
6extern struct dma_mapping_ops nommu_dma_ops; 6extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled;
10 9
11extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); 10extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
12 11
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 485bdf059ffb..07f1af494ca5 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -34,10 +34,14 @@ static inline void get_memcfg_numa(void)
34 34
35extern int early_pfn_to_nid(unsigned long pfn); 35extern int early_pfn_to_nid(unsigned long pfn);
36 36
37extern void resume_map_numa_kva(pgd_t *pgd);
38
37#else /* !CONFIG_NUMA */ 39#else /* !CONFIG_NUMA */
38 40
39#define get_memcfg_numa get_memcfg_numa_flat 41#define get_memcfg_numa get_memcfg_numa_flat
40 42
43static inline void resume_map_numa_kva(pgd_t *pgd) {}
44
41#endif /* CONFIG_NUMA */ 45#endif /* CONFIG_NUMA */
42 46
43#ifdef CONFIG_DISCONTIGMEM 47#ifdef CONFIG_DISCONTIGMEM
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h
index 5b28995d664e..d02d936840a3 100644
--- a/arch/x86/include/asm/pci_64.h
+++ b/arch/x86/include/asm/pci_64.h
@@ -34,8 +34,6 @@ extern void pci_iommu_alloc(void);
34 */ 34 */
35#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) 35#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
36 36
37#if defined(CONFIG_GART_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
38
39#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
40 dma_addr_t ADDR_NAME; 38 dma_addr_t ADDR_NAME;
41#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ 39#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
@@ -49,18 +47,6 @@ extern void pci_iommu_alloc(void);
49#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 47#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
50 (((PTR)->LEN_NAME) = (VAL)) 48 (((PTR)->LEN_NAME) = (VAL))
51 49
52#else
53/* No IOMMU */
54
55#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
56#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
57#define pci_unmap_addr(PTR, ADDR_NAME) (0)
58#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
59#define pci_unmap_len(PTR, LEN_NAME) (0)
60#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
61
62#endif
63
64#endif /* __KERNEL__ */ 50#endif /* __KERNEL__ */
65 51
66#endif /* _ASM_X86_PCI_64_H */ 52#endif /* _ASM_X86_PCI_64_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index d1531c8480b7..eefb0594b058 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -271,8 +271,6 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
271extern int do_set_thread_area(struct task_struct *p, int idx, 271extern int do_set_thread_area(struct task_struct *p, int idx,
272 struct user_desc __user *info, int can_allocate); 272 struct user_desc __user *info, int can_allocate);
273 273
274#define __ARCH_WANT_COMPAT_SYS_PTRACE
275
276#endif /* __KERNEL__ */ 274#endif /* __KERNEL__ */
277 275
278#endif /* !__ASSEMBLY__ */ 276#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 4850e4b02b61..ff386ff50ed7 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -239,7 +239,7 @@ struct pci_bus;
239void set_pci_bus_resources_arch_default(struct pci_bus *b); 239void set_pci_bus_resources_arch_default(struct pci_bus *b);
240 240
241#ifdef CONFIG_SMP 241#ifdef CONFIG_SMP
242#define mc_capable() (boot_cpu_data.x86_max_cores > 1) 242#define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
243#define smt_capable() (smp_num_siblings > 1) 243#define smt_capable() (smp_num_siblings > 1)
244#endif 244#endif
245 245
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 664f15280f14..f8cfd00db450 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -46,7 +46,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
46 return ret; 46 return ret;
47 case 10: 47 case 10:
48 __get_user_asm(*(u64 *)dst, (u64 __user *)src, 48 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
49 ret, "q", "", "=r", 16); 49 ret, "q", "", "=r", 10);
50 if (unlikely(ret)) 50 if (unlikely(ret))
51 return ret; 51 return ret;
52 __get_user_asm(*(u16 *)(8 + (char *)dst), 52 __get_user_asm(*(u16 *)(8 + (char *)dst),
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 834b2c1d89fb..d2e415e6666f 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -639,8 +639,8 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 639__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
640#define __NR_timerfd_gettime 287 640#define __NR_timerfd_gettime 287
641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 641__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
642#define __NR_paccept 288 642#define __NR_accept4 288
643__SYSCALL(__NR_paccept, sys_paccept) 643__SYSCALL(__NR_accept4, sys_accept4)
644#define __NR_signalfd4 289 644#define __NR_signalfd4 289
645__SYSCALL(__NR_signalfd4, sys_signalfd4) 645__SYSCALL(__NR_signalfd4, sys_signalfd4)
646#define __NR_eventfd2 290 646#define __NR_eventfd2 290
diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
index b7c0dea119fe..61e08c0a2907 100644
--- a/arch/x86/include/asm/vmi.h
+++ b/arch/x86/include/asm/vmi.h
@@ -223,9 +223,15 @@ struct pci_header {
223} __attribute__((packed)); 223} __attribute__((packed));
224 224
225/* Function prototypes for bootstrapping */ 225/* Function prototypes for bootstrapping */
226#ifdef CONFIG_VMI
226extern void vmi_init(void); 227extern void vmi_init(void);
228extern void vmi_activate(void);
227extern void vmi_bringup(void); 229extern void vmi_bringup(void);
228extern void vmi_apply_boot_page_allocations(void); 230#else
231static inline void vmi_init(void) {}
232static inline void vmi_activate(void) {}
233static inline void vmi_bringup(void) {}
234#endif
229 235
230/* State needed to start an application processor in an SMP system. */ 236/* State needed to start an application processor in an SMP system. */
231struct vmi_ap_state { 237struct vmi_ap_state {
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e489ff9cb3e2..b62a7667828e 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
41obj-y += process.o 41obj-y += process.o
42obj-y += i387.o xsave.o 42obj-y += i387.o xsave.o
43obj-y += ptrace.o 43obj-y += ptrace.o
44obj-y += ds.o 44obj-$(CONFIG_X86_DS) += ds.o
45obj-$(CONFIG_X86_32) += tls.o 45obj-$(CONFIG_X86_32) += tls.o
46obj-$(CONFIG_IA32_EMULATION) += tls.o 46obj-$(CONFIG_IA32_EMULATION) += tls.o
47obj-y += step.o 47obj-y += step.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8c1f76abae9e..4c51a2f8fd31 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1343,7 +1343,6 @@ static void __init acpi_process_madt(void)
1343 error = acpi_parse_madt_ioapic_entries(); 1343 error = acpi_parse_madt_ioapic_entries();
1344 if (!error) { 1344 if (!error) {
1345 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; 1345 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
1346 acpi_irq_balance_set(NULL);
1347 acpi_ioapic = 1; 1346 acpi_ioapic = 1;
1348 1347
1349 smp_found_config = 1; 1348 smp_found_config = 1;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 331b318304eb..0a60d60ed036 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
187 187
188 spin_lock_irqsave(&iommu->lock, flags); 188 spin_lock_irqsave(&iommu->lock, flags);
189 ret = __iommu_queue_command(iommu, cmd); 189 ret = __iommu_queue_command(iommu, cmd);
190 if (!ret)
191 iommu->need_sync = 1;
190 spin_unlock_irqrestore(&iommu->lock, flags); 192 spin_unlock_irqrestore(&iommu->lock, flags);
191 193
192 return ret; 194 return ret;
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
210 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 212 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
211 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 213 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
212 214
213 iommu->need_sync = 0;
214
215 spin_lock_irqsave(&iommu->lock, flags); 215 spin_lock_irqsave(&iommu->lock, flags);
216 216
217 if (!iommu->need_sync)
218 goto out;
219
220 iommu->need_sync = 0;
221
217 ret = __iommu_queue_command(iommu, &cmd); 222 ret = __iommu_queue_command(iommu, &cmd);
218 223
219 if (ret) 224 if (ret)
@@ -230,8 +235,9 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
230 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 235 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
231 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 236 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
232 237
233 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) 238 if (unlikely(i == EXIT_LOOP_COUNT))
234 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); 239 panic("AMD IOMMU: Completion wait loop failed\n");
240
235out: 241out:
236 spin_unlock_irqrestore(&iommu->lock, flags); 242 spin_unlock_irqrestore(&iommu->lock, flags);
237 243
@@ -254,8 +260,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
254 260
255 ret = iommu_queue_command(iommu, &cmd); 261 ret = iommu_queue_command(iommu, &cmd);
256 262
257 iommu->need_sync = 1;
258
259 return ret; 263 return ret;
260} 264}
261 265
@@ -281,8 +285,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
281 285
282 ret = iommu_queue_command(iommu, &cmd); 286 ret = iommu_queue_command(iommu, &cmd);
283 287
284 iommu->need_sync = 1;
285
286 return ret; 288 return ret;
287} 289}
288 290
@@ -343,7 +345,7 @@ static int iommu_map(struct protection_domain *dom,
343 u64 __pte, *pte, *page; 345 u64 __pte, *pte, *page;
344 346
345 bus_addr = PAGE_ALIGN(bus_addr); 347 bus_addr = PAGE_ALIGN(bus_addr);
346 phys_addr = PAGE_ALIGN(bus_addr); 348 phys_addr = PAGE_ALIGN(phys_addr);
347 349
348 /* only support 512GB address spaces for now */ 350 /* only support 512GB address spaces for now */
349 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 351 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@@ -537,7 +539,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
537 address >>= PAGE_SHIFT; 539 address >>= PAGE_SHIFT;
538 iommu_area_free(dom->bitmap, address, pages); 540 iommu_area_free(dom->bitmap, address, pages);
539 541
540 if (address + pages >= dom->next_bit) 542 if (address >= dom->next_bit)
541 dom->need_flush = true; 543 dom->need_flush = true;
542} 544}
543 545
@@ -599,7 +601,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
599 continue; 601 continue;
600 602
601 p2 = IOMMU_PTE_PAGE(p1[i]); 603 p2 = IOMMU_PTE_PAGE(p1[i]);
602 for (j = 0; j < 512; ++i) { 604 for (j = 0; j < 512; ++j) {
603 if (!IOMMU_PTE_PRESENT(p2[j])) 605 if (!IOMMU_PTE_PRESENT(p2[j]))
604 continue; 606 continue;
605 p3 = IOMMU_PTE_PAGE(p2[j]); 607 p3 = IOMMU_PTE_PAGE(p2[j]);
@@ -762,8 +764,6 @@ static void set_device_domain(struct amd_iommu *iommu,
762 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 764 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
763 765
764 iommu_queue_inv_dev_entry(iommu, devid); 766 iommu_queue_inv_dev_entry(iommu, devid);
765
766 iommu->need_sync = 1;
767} 767}
768 768
769/***************************************************************************** 769/*****************************************************************************
@@ -858,6 +858,9 @@ static int get_device_resources(struct device *dev,
858 print_devid(_bdf, 1); 858 print_devid(_bdf, 1);
859 } 859 }
860 860
861 if (domain_for_device(_bdf) == NULL)
862 set_device_domain(*iommu, *domain, _bdf);
863
861 return 1; 864 return 1;
862} 865}
863 866
@@ -908,7 +911,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
908 if (address >= dom->aperture_size) 911 if (address >= dom->aperture_size)
909 return; 912 return;
910 913
911 WARN_ON(address & 0xfffULL || address > dom->aperture_size); 914 WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
912 915
913 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; 916 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
914 pte += IOMMU_PTE_L0_INDEX(address); 917 pte += IOMMU_PTE_L0_INDEX(address);
@@ -920,8 +923,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
920 923
921/* 924/*
922 * This function contains common code for mapping of a physically 925 * This function contains common code for mapping of a physically
923 * contiguous memory region into DMA address space. It is uses by all 926 * contiguous memory region into DMA address space. It is used by all
924 * mapping functions provided by this IOMMU driver. 927 * mapping functions provided with this IOMMU driver.
925 * Must be called with the domain lock held. 928 * Must be called with the domain lock held.
926 */ 929 */
927static dma_addr_t __map_single(struct device *dev, 930static dma_addr_t __map_single(struct device *dev,
@@ -981,7 +984,8 @@ static void __unmap_single(struct amd_iommu *iommu,
981 dma_addr_t i, start; 984 dma_addr_t i, start;
982 unsigned int pages; 985 unsigned int pages;
983 986
984 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 987 if ((dma_addr == bad_dma_address) ||
988 (dma_addr + size > dma_dom->aperture_size))
985 return; 989 return;
986 990
987 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 991 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -1031,8 +1035,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1031 if (addr == bad_dma_address) 1035 if (addr == bad_dma_address)
1032 goto out; 1036 goto out;
1033 1037
1034 if (unlikely(iommu->need_sync)) 1038 iommu_completion_wait(iommu);
1035 iommu_completion_wait(iommu);
1036 1039
1037out: 1040out:
1038 spin_unlock_irqrestore(&domain->lock, flags); 1041 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1060,8 +1063,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1060 1063
1061 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1064 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1062 1065
1063 if (unlikely(iommu->need_sync)) 1066 iommu_completion_wait(iommu);
1064 iommu_completion_wait(iommu);
1065 1067
1066 spin_unlock_irqrestore(&domain->lock, flags); 1068 spin_unlock_irqrestore(&domain->lock, flags);
1067} 1069}
@@ -1127,8 +1129,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1127 goto unmap; 1129 goto unmap;
1128 } 1130 }
1129 1131
1130 if (unlikely(iommu->need_sync)) 1132 iommu_completion_wait(iommu);
1131 iommu_completion_wait(iommu);
1132 1133
1133out: 1134out:
1134 spin_unlock_irqrestore(&domain->lock, flags); 1135 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1173,8 +1174,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1173 s->dma_address = s->dma_length = 0; 1174 s->dma_address = s->dma_length = 0;
1174 } 1175 }
1175 1176
1176 if (unlikely(iommu->need_sync)) 1177 iommu_completion_wait(iommu);
1177 iommu_completion_wait(iommu);
1178 1178
1179 spin_unlock_irqrestore(&domain->lock, flags); 1179 spin_unlock_irqrestore(&domain->lock, flags);
1180} 1180}
@@ -1225,8 +1225,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1225 goto out; 1225 goto out;
1226 } 1226 }
1227 1227
1228 if (unlikely(iommu->need_sync)) 1228 iommu_completion_wait(iommu);
1229 iommu_completion_wait(iommu);
1230 1229
1231out: 1230out:
1232 spin_unlock_irqrestore(&domain->lock, flags); 1231 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1257,8 +1256,7 @@ static void free_coherent(struct device *dev, size_t size,
1257 1256
1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1257 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1259 1258
1260 if (unlikely(iommu->need_sync)) 1259 iommu_completion_wait(iommu);
1261 iommu_completion_wait(iommu);
1262 1260
1263 spin_unlock_irqrestore(&domain->lock, flags); 1261 spin_unlock_irqrestore(&domain->lock, flags);
1264 1262
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 0cdcda35a05f..c6cc22815d35 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
121LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 121LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
122 we find in ACPI */ 122 we find in ACPI */
123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ 123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
124int amd_iommu_isolate; /* if 1, device isolation is enabled */ 124int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
126 126
127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
@@ -427,6 +427,10 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
427 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 427 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
428 &entry, sizeof(entry)); 428 &entry, sizeof(entry));
429 429
430 /* set head and tail to zero manually */
431 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
432 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
433
430 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 434 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
431 435
432 return cmd_buf; 436 return cmd_buf;
@@ -1074,7 +1078,8 @@ int __init amd_iommu_init(void)
1074 goto free; 1078 goto free;
1075 1079
1076 /* IOMMU rlookup table - find the IOMMU for a specific device */ 1080 /* IOMMU rlookup table - find the IOMMU for a specific device */
1077 amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL, 1081 amd_iommu_rlookup_table = (void *)__get_free_pages(
1082 GFP_KERNEL | __GFP_ZERO,
1078 get_order(rlookup_table_size)); 1083 get_order(rlookup_table_size));
1079 if (amd_iommu_rlookup_table == NULL) 1084 if (amd_iommu_rlookup_table == NULL)
1080 goto free; 1085 goto free;
@@ -1213,7 +1218,9 @@ static int __init parse_amd_iommu_options(char *str)
1213 for (; *str; ++str) { 1218 for (; *str; ++str) {
1214 if (strncmp(str, "isolate", 7) == 0) 1219 if (strncmp(str, "isolate", 7) == 0)
1215 amd_iommu_isolate = 1; 1220 amd_iommu_isolate = 1;
1216 if (strncmp(str, "fullflush", 11) == 0) 1221 if (strncmp(str, "share", 5) == 0)
1222 amd_iommu_isolate = 0;
1223 if (strncmp(str, "fullflush", 9) == 0)
1217 amd_iommu_unmap_flush = true; 1224 amd_iommu_unmap_flush = true;
1218 } 1225 }
1219 1226
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 04a7f960bbc0..16f94879b525 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1315,7 +1315,7 @@ void enable_x2apic(void)
1315 } 1315 }
1316} 1316}
1317 1317
1318void enable_IR_x2apic(void) 1318void __init enable_IR_x2apic(void)
1319{ 1319{
1320#ifdef CONFIG_INTR_REMAP 1320#ifdef CONFIG_INTR_REMAP
1321 int ret; 1321 int ret;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index d3dcd58b87cd..7f05f44b97e9 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -115,9 +115,20 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
115 u32 i = 0; 115 u32 i = 0;
116 116
117 if (cpu_family == CPU_HW_PSTATE) { 117 if (cpu_family == CPU_HW_PSTATE) {
118 rdmsr(MSR_PSTATE_STATUS, lo, hi); 118 if (data->currpstate == HW_PSTATE_INVALID) {
119 i = lo & HW_PSTATE_MASK; 119 /* read (initial) hw pstate if not yet set */
120 data->currpstate = i; 120 rdmsr(MSR_PSTATE_STATUS, lo, hi);
121 i = lo & HW_PSTATE_MASK;
122
123 /*
124 * a workaround for family 11h erratum 311 might cause
125 * an "out-of-range Pstate if the core is in Pstate-0
126 */
127 if (i >= data->numps)
128 data->currpstate = HW_PSTATE_0;
129 else
130 data->currpstate = i;
131 }
121 return 0; 132 return 0;
122 } 133 }
123 do { 134 do {
@@ -1121,6 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1121 } 1132 }
1122 1133
1123 data->cpu = pol->cpu; 1134 data->cpu = pol->cpu;
1135 data->currpstate = HW_PSTATE_INVALID;
1124 1136
1125 if (powernow_k8_cpu_init_acpi(data)) { 1137 if (powernow_k8_cpu_init_acpi(data)) {
1126 /* 1138 /*
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index ab48cfed4d96..65cfb5d7f77f 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -5,6 +5,19 @@
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 */ 6 */
7 7
8
9enum pstate {
10 HW_PSTATE_INVALID = 0xff,
11 HW_PSTATE_0 = 0,
12 HW_PSTATE_1 = 1,
13 HW_PSTATE_2 = 2,
14 HW_PSTATE_3 = 3,
15 HW_PSTATE_4 = 4,
16 HW_PSTATE_5 = 5,
17 HW_PSTATE_6 = 6,
18 HW_PSTATE_7 = 7,
19};
20
8struct powernow_k8_data { 21struct powernow_k8_data {
9 unsigned int cpu; 22 unsigned int cpu;
10 23
@@ -23,7 +36,9 @@ struct powernow_k8_data {
23 u32 exttype; /* extended interface = 1 */ 36 u32 exttype; /* extended interface = 1 */
24 37
25 /* keep track of the current fid / vid or pstate */ 38 /* keep track of the current fid / vid or pstate */
26 u32 currvid, currfid, currpstate; 39 u32 currvid;
40 u32 currfid;
41 enum pstate currpstate;
27 42
28 /* the powernow_table includes all frequency and vid/fid pairings: 43 /* the powernow_table includes all frequency and vid/fid pairings:
29 * fid are the lower 8 bits of the index, vid are the upper 8 bits. 44 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 4b031a4ac856..1c838032fd37 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -510,12 +510,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
510 */ 510 */
511void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 511void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
512{ 512{
513 static cpumask_t mce_cpus = CPU_MASK_NONE;
514
515 mce_cpu_quirks(c); 513 mce_cpu_quirks(c);
516 514
517 if (mce_dont_init || 515 if (mce_dont_init ||
518 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
519 !mce_available(c)) 516 !mce_available(c))
520 return; 517 return;
521 518
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 2b69994fd3a8..a2d1176c38ee 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -21,8 +21,6 @@
21 */ 21 */
22 22
23 23
24#ifdef CONFIG_X86_DS
25
26#include <asm/ds.h> 24#include <asm/ds.h>
27 25
28#include <linux/errno.h> 26#include <linux/errno.h>
@@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
211static inline struct ds_context *ds_get_context(struct task_struct *task) 209static inline struct ds_context *ds_get_context(struct task_struct *task)
212{ 210{
213 struct ds_context *context; 211 struct ds_context *context;
212 unsigned long irq;
214 213
215 spin_lock(&ds_lock); 214 spin_lock_irqsave(&ds_lock, irq);
216 215
217 context = (task ? task->thread.ds_ctx : this_system_context); 216 context = (task ? task->thread.ds_ctx : this_system_context);
218 if (context) 217 if (context)
219 context->count++; 218 context->count++;
220 219
221 spin_unlock(&ds_lock); 220 spin_unlock_irqrestore(&ds_lock, irq);
222 221
223 return context; 222 return context;
224} 223}
@@ -226,18 +225,16 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
226/* 225/*
227 * Same as ds_get_context, but allocates the context and it's DS 226 * Same as ds_get_context, but allocates the context and it's DS
228 * structure, if necessary; returns NULL; if out of memory. 227 * structure, if necessary; returns NULL; if out of memory.
229 *
230 * pre: requires ds_lock to be held
231 */ 228 */
232static inline struct ds_context *ds_alloc_context(struct task_struct *task) 229static inline struct ds_context *ds_alloc_context(struct task_struct *task)
233{ 230{
234 struct ds_context **p_context = 231 struct ds_context **p_context =
235 (task ? &task->thread.ds_ctx : &this_system_context); 232 (task ? &task->thread.ds_ctx : &this_system_context);
236 struct ds_context *context = *p_context; 233 struct ds_context *context = *p_context;
234 unsigned long irq;
237 235
238 if (!context) { 236 if (!context) {
239 context = kzalloc(sizeof(*context), GFP_KERNEL); 237 context = kzalloc(sizeof(*context), GFP_KERNEL);
240
241 if (!context) 238 if (!context)
242 return NULL; 239 return NULL;
243 240
@@ -247,18 +244,27 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
247 return NULL; 244 return NULL;
248 } 245 }
249 246
250 *p_context = context; 247 spin_lock_irqsave(&ds_lock, irq);
251 248
252 context->this = p_context; 249 if (*p_context) {
253 context->task = task; 250 kfree(context->ds);
251 kfree(context);
252
253 context = *p_context;
254 } else {
255 *p_context = context;
254 256
255 if (task) 257 context->this = p_context;
256 set_tsk_thread_flag(task, TIF_DS_AREA_MSR); 258 context->task = task;
257 259
258 if (!task || (task == current)) 260 if (task)
259 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); 261 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
260 262
261 get_tracer(task); 263 if (!task || (task == current))
264 wrmsrl(MSR_IA32_DS_AREA,
265 (unsigned long)context->ds);
266 }
267 spin_unlock_irqrestore(&ds_lock, irq);
262 } 268 }
263 269
264 context->count++; 270 context->count++;
@@ -272,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
272 */ 278 */
273static inline void ds_put_context(struct ds_context *context) 279static inline void ds_put_context(struct ds_context *context)
274{ 280{
281 unsigned long irq;
282
275 if (!context) 283 if (!context)
276 return; 284 return;
277 285
278 spin_lock(&ds_lock); 286 spin_lock_irqsave(&ds_lock, irq);
279 287
280 if (--context->count) 288 if (--context->count)
281 goto out; 289 goto out;
@@ -297,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
297 kfree(context->ds); 305 kfree(context->ds);
298 kfree(context); 306 kfree(context);
299 out: 307 out:
300 spin_unlock(&ds_lock); 308 spin_unlock_irqrestore(&ds_lock, irq);
301} 309}
302 310
303 311
@@ -368,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
368 struct ds_context *context; 376 struct ds_context *context;
369 unsigned long buffer, adj; 377 unsigned long buffer, adj;
370 const unsigned long alignment = (1 << 3); 378 const unsigned long alignment = (1 << 3);
379 unsigned long irq;
371 int error = 0; 380 int error = 0;
372 381
373 if (!ds_cfg.sizeof_ds) 382 if (!ds_cfg.sizeof_ds)
@@ -382,25 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
382 return -EOPNOTSUPP; 391 return -EOPNOTSUPP;
383 392
384 393
385 spin_lock(&ds_lock);
386
387 if (!check_tracer(task))
388 return -EPERM;
389
390 error = -ENOMEM;
391 context = ds_alloc_context(task); 394 context = ds_alloc_context(task);
392 if (!context) 395 if (!context)
396 return -ENOMEM;
397
398 spin_lock_irqsave(&ds_lock, irq);
399
400 error = -EPERM;
401 if (!check_tracer(task))
393 goto out_unlock; 402 goto out_unlock;
394 403
404 get_tracer(task);
405
395 error = -EALREADY; 406 error = -EALREADY;
396 if (context->owner[qual] == current) 407 if (context->owner[qual] == current)
397 goto out_unlock; 408 goto out_put_tracer;
398 error = -EPERM; 409 error = -EPERM;
399 if (context->owner[qual] != NULL) 410 if (context->owner[qual] != NULL)
400 goto out_unlock; 411 goto out_put_tracer;
401 context->owner[qual] = current; 412 context->owner[qual] = current;
402 413
403 spin_unlock(&ds_lock); 414 spin_unlock_irqrestore(&ds_lock, irq);
404 415
405 416
406 error = -ENOMEM; 417 error = -ENOMEM;
@@ -448,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
448 out_release: 459 out_release:
449 context->owner[qual] = NULL; 460 context->owner[qual] = NULL;
450 ds_put_context(context); 461 ds_put_context(context);
462 put_tracer(task);
463 return error;
464
465 out_put_tracer:
466 spin_unlock_irqrestore(&ds_lock, irq);
467 ds_put_context(context);
468 put_tracer(task);
451 return error; 469 return error;
452 470
453 out_unlock: 471 out_unlock:
454 spin_unlock(&ds_lock); 472 spin_unlock_irqrestore(&ds_lock, irq);
455 ds_put_context(context); 473 ds_put_context(context);
456 return error; 474 return error;
457} 475}
@@ -801,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = {
801 .sizeof_ds = sizeof(long) * 12, 819 .sizeof_ds = sizeof(long) * 12,
802 .sizeof_field = sizeof(long), 820 .sizeof_field = sizeof(long),
803 .sizeof_rec[ds_bts] = sizeof(long) * 3, 821 .sizeof_rec[ds_bts] = sizeof(long) * 3,
822#ifdef __i386__
804 .sizeof_rec[ds_pebs] = sizeof(long) * 10 823 .sizeof_rec[ds_pebs] = sizeof(long) * 10
824#else
825 .sizeof_rec[ds_pebs] = sizeof(long) * 18
826#endif
805}; 827};
806static const struct ds_configuration ds_cfg_64 = { 828static const struct ds_configuration ds_cfg_64 = {
807 .sizeof_ds = 8 * 12, 829 .sizeof_ds = 8 * 12,
808 .sizeof_field = 8, 830 .sizeof_field = 8,
809 .sizeof_rec[ds_bts] = 8 * 3, 831 .sizeof_rec[ds_bts] = 8 * 3,
832#ifdef __i386__
810 .sizeof_rec[ds_pebs] = 8 * 10 833 .sizeof_rec[ds_pebs] = 8 * 10
834#else
835 .sizeof_rec[ds_pebs] = 8 * 18
836#endif
811}; 837};
812 838
813static inline void 839static inline void
@@ -861,4 +887,3 @@ void ds_free(struct ds_context *context)
861 while (leftovers--) 887 while (leftovers--)
862 ds_put_context(context); 888 ds_put_context(context);
863} 889}
864#endif /* CONFIG_X86_DS */
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 3ce029ffaa55..1b894b72c0f5 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -188,20 +188,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
188} 188}
189#endif 189#endif
190 190
191#ifdef CONFIG_DMAR
192static void __init intel_g33_dmar(int num, int slot, int func)
193{
194 struct acpi_table_header *dmar_tbl;
195 acpi_status status;
196
197 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
198 if (ACPI_SUCCESS(status)) {
199 printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n");
200 dmar_disabled = 1;
201 }
202}
203#endif
204
205#define QFLAG_APPLY_ONCE 0x1 191#define QFLAG_APPLY_ONCE 0x1
206#define QFLAG_APPLIED 0x2 192#define QFLAG_APPLIED 0x2
207#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 193#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -225,10 +211,6 @@ static struct chipset early_qrk[] __initdata = {
225 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 211 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
226 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 212 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
227 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 213 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
228#ifdef CONFIG_DMAR
229 { PCI_VENDOR_ID_INTEL, 0x29c0,
230 PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar },
231#endif
232 {} 214 {}
233}; 215};
234 216
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
index f454c78fcef6..0aa2c443d600 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/es7000_32.c
@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
250{ 250{
251 struct acpi_table_header *header = NULL; 251 struct acpi_table_header *header = NULL;
252 int i = 0; 252 int i = 0;
253 acpi_size tbl_size;
254 253
255 while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { 254 while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
256 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { 255 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
257 struct oem_table *t = (struct oem_table *)header; 256 struct oem_table *t = (struct oem_table *)header;
258 257
259 oem_addrX = t->OEMTableAddr; 258 oem_addrX = t->OEMTableAddr;
260 oem_size = t->OEMTableSize; 259 oem_size = t->OEMTableSize;
261 early_acpi_os_unmap_memory(header, tbl_size);
262 260
263 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, 261 *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
264 oem_size); 262 oem_size);
265 return 0; 263 return 0;
266 } 264 }
267 early_acpi_os_unmap_memory(header, tbl_size);
268 } 265 }
269 return -1; 266 return -1;
270} 267}
271 268
272void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) 269void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
273{ 270{
274 if (!oem_addr)
275 return;
276
277 __acpi_unmap_table((char *)oem_addr, oem_size);
278} 271}
279#endif 272#endif
280 273
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 77017e834cf7..a1f6ed5e1a05 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -322,7 +322,7 @@ static int hpet_next_event(unsigned long delta,
322 * what we wrote hit the chip before we compare it to the 322 * what we wrote hit the chip before we compare it to the
323 * counter. 323 * counter.
324 */ 324 */
325 WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt); 325 WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
326 326
327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 327 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
328} 328}
@@ -445,7 +445,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
445{ 445{
446 446
447 if (request_irq(dev->irq, hpet_interrupt_handler, 447 if (request_irq(dev->irq, hpet_interrupt_handler,
448 IRQF_SHARED|IRQF_NOBALANCING, dev->name, dev)) 448 IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
449 return -1; 449 return -1;
450 450
451 disable_irq(dev->irq); 451 disable_irq(dev->irq);
@@ -811,7 +811,7 @@ int __init hpet_enable(void)
811 811
812out_nohpet: 812out_nohpet:
813 hpet_clear_mapping(); 813 hpet_clear_mapping();
814 boot_hpet_disable = 1; 814 hpet_address = 0;
815 return 0; 815 return 0;
816} 816}
817 817
@@ -834,10 +834,11 @@ static __init int hpet_late_init(void)
834 834
835 hpet_address = force_hpet_address; 835 hpet_address = force_hpet_address;
836 hpet_enable(); 836 hpet_enable();
837 if (!hpet_virt_address)
838 return -ENODEV;
839 } 837 }
840 838
839 if (!hpet_virt_address)
840 return -ENODEV;
841
841 hpet_reserve_platform_timers(hpet_readl(HPET_ID)); 842 hpet_reserve_platform_timers(hpet_readl(HPET_ID));
842 843
843 for_each_online_cpu(cpu) { 844 for_each_online_cpu(cpu) {
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 1f20608d4ca8..b0f61f0dcd0a 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
58 stts(); 58 stts();
59} 59}
60 60
61void __init init_thread_xstate(void) 61void __cpuinit init_thread_xstate(void)
62{ 62{
63 if (!HAVE_HWFP) { 63 if (!HAVE_HWFP) {
64 xstate_size = sizeof(struct i387_soft_struct); 64 xstate_size = sizeof(struct i387_soft_struct);
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 7a3f2028e2eb..9043251210fb 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -1140,6 +1140,20 @@ static void __clear_irq_vector(int irq)
1140 1140
1141 cfg->vector = 0; 1141 cfg->vector = 0;
1142 cpus_clear(cfg->domain); 1142 cpus_clear(cfg->domain);
1143
1144 if (likely(!cfg->move_in_progress))
1145 return;
1146 cpus_and(mask, cfg->old_domain, cpu_online_map);
1147 for_each_cpu_mask_nr(cpu, mask) {
1148 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1149 vector++) {
1150 if (per_cpu(vector_irq, cpu)[vector] != irq)
1151 continue;
1152 per_cpu(vector_irq, cpu)[vector] = -1;
1153 break;
1154 }
1155 }
1156 cfg->move_in_progress = 0;
1143} 1157}
1144 1158
1145void __setup_vector_irq(int cpu) 1159void __setup_vector_irq(int cpu)
@@ -3594,27 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic)
3594 3608
3595int __init probe_nr_irqs(void) 3609int __init probe_nr_irqs(void)
3596{ 3610{
3597 int idx; 3611 return NR_IRQS;
3598 int nr = 0;
3599#ifndef CONFIG_XEN
3600 int nr_min = 32;
3601#else
3602 int nr_min = NR_IRQS;
3603#endif
3604
3605 for (idx = 0; idx < nr_ioapics; idx++)
3606 nr += io_apic_get_redir_entries(idx) + 1;
3607
3608 /* double it for hotplug and msi and nmi */
3609 nr <<= 1;
3610
3611 /* something wrong ? */
3612 if (nr < nr_min)
3613 nr = nr_min;
3614 if (WARN_ON(nr > NR_IRQS))
3615 nr = NR_IRQS;
3616
3617 return nr;
3618} 3612}
3619 3613
3620/* -------------------------------------------------------------------------- 3614/* --------------------------------------------------------------------------
@@ -3761,7 +3755,9 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3761void __init setup_ioapic_dest(void) 3755void __init setup_ioapic_dest(void)
3762{ 3756{
3763 int pin, ioapic, irq, irq_entry; 3757 int pin, ioapic, irq, irq_entry;
3758 struct irq_desc *desc;
3764 struct irq_cfg *cfg; 3759 struct irq_cfg *cfg;
3760 cpumask_t mask;
3765 3761
3766 if (skip_ioapic_setup == 1) 3762 if (skip_ioapic_setup == 1)
3767 return; 3763 return;
@@ -3778,16 +3774,30 @@ void __init setup_ioapic_dest(void)
3778 * cpu is online. 3774 * cpu is online.
3779 */ 3775 */
3780 cfg = irq_cfg(irq); 3776 cfg = irq_cfg(irq);
3781 if (!cfg->vector) 3777 if (!cfg->vector) {
3782 setup_IO_APIC_irq(ioapic, pin, irq, 3778 setup_IO_APIC_irq(ioapic, pin, irq,
3783 irq_trigger(irq_entry), 3779 irq_trigger(irq_entry),
3784 irq_polarity(irq_entry)); 3780 irq_polarity(irq_entry));
3781 continue;
3782
3783 }
3784
3785 /*
3786 * Honour affinities which have been set in early boot
3787 */
3788 desc = irq_to_desc(irq);
3789 if (desc->status &
3790 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3791 mask = desc->affinity;
3792 else
3793 mask = TARGET_CPUS;
3794
3785#ifdef CONFIG_INTR_REMAP 3795#ifdef CONFIG_INTR_REMAP
3786 else if (intr_remapping_enabled) 3796 if (intr_remapping_enabled)
3787 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS); 3797 set_ir_ioapic_affinity_irq(irq, mask);
3788#endif
3789 else 3798 else
3790 set_ioapic_affinity_irq(irq, TARGET_CPUS); 3799#endif
3800 set_ioapic_affinity_irq(irq, mask);
3791 } 3801 }
3792 3802
3793 } 3803 }
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 774ac4991568..e169ae9b6a62 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt)
128} 128}
129 129
130#ifdef CONFIG_X86_LOCAL_APIC 130#ifdef CONFIG_X86_LOCAL_APIC
131static void kvm_setup_secondary_clock(void) 131static void __cpuinit kvm_setup_secondary_clock(void)
132{ 132{
133 /* 133 /*
134 * Now that the first cpu already had this clocksource initialized, 134 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 82fb2809ce32..c4b5b24e0217 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -272,13 +272,18 @@ static struct attribute_group mc_attr_group = {
272 .name = "microcode", 272 .name = "microcode",
273}; 273};
274 274
275static void microcode_fini_cpu(int cpu) 275static void __microcode_fini_cpu(int cpu)
276{ 276{
277 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 277 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
278 278
279 mutex_lock(&microcode_mutex);
280 microcode_ops->microcode_fini_cpu(cpu); 279 microcode_ops->microcode_fini_cpu(cpu);
281 uci->valid = 0; 280 uci->valid = 0;
281}
282
283static void microcode_fini_cpu(int cpu)
284{
285 mutex_lock(&microcode_mutex);
286 __microcode_fini_cpu(cpu);
282 mutex_unlock(&microcode_mutex); 287 mutex_unlock(&microcode_mutex);
283} 288}
284 289
@@ -306,12 +311,16 @@ static int microcode_resume_cpu(int cpu)
306 * to this cpu (a bit of paranoia): 311 * to this cpu (a bit of paranoia):
307 */ 312 */
308 if (microcode_ops->collect_cpu_info(cpu, &nsig)) { 313 if (microcode_ops->collect_cpu_info(cpu, &nsig)) {
309 microcode_fini_cpu(cpu); 314 __microcode_fini_cpu(cpu);
315 printk(KERN_ERR "failed to collect_cpu_info for resuming cpu #%d\n",
316 cpu);
310 return -1; 317 return -1;
311 } 318 }
312 319
313 if (memcmp(&nsig, &uci->cpu_sig, sizeof(nsig))) { 320 if ((nsig.sig != uci->cpu_sig.sig) || (nsig.pf != uci->cpu_sig.pf)) {
314 microcode_fini_cpu(cpu); 321 __microcode_fini_cpu(cpu);
322 printk(KERN_ERR "cached ucode doesn't match the resuming cpu #%d\n",
323 cpu);
315 /* Should we look for a new ucode here? */ 324 /* Should we look for a new ucode here? */
316 return 1; 325 return 1;
317 } 326 }
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 622dc4a21784..a8e62792d171 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -155,6 +155,7 @@ static DEFINE_SPINLOCK(microcode_update_lock);
155static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) 155static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
156{ 156{
157 struct cpuinfo_x86 *c = &cpu_data(cpu_num); 157 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
158 unsigned long flags;
158 unsigned int val[2]; 159 unsigned int val[2];
159 160
160 memset(csig, 0, sizeof(*csig)); 161 memset(csig, 0, sizeof(*csig));
@@ -174,11 +175,16 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
174 csig->pf = 1 << ((val[1] >> 18) & 7); 175 csig->pf = 1 << ((val[1] >> 18) & 7);
175 } 176 }
176 177
178 /* serialize access to the physical write to MSR 0x79 */
179 spin_lock_irqsave(&microcode_update_lock, flags);
180
177 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 181 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
178 /* see notes above for revision 1.07. Apparent chip bug */ 182 /* see notes above for revision 1.07. Apparent chip bug */
179 sync_core(); 183 sync_core();
180 /* get the current revision from MSR 0x8B */ 184 /* get the current revision from MSR 0x8B */
181 rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); 185 rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev);
186 spin_unlock_irqrestore(&microcode_update_lock, flags);
187
182 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", 188 pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
183 csig->sig, csig->pf, csig->rev); 189 csig->sig, csig->pf, csig->rev);
184 190
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index f98f4e1dba09..0f4c1fd5a1f4 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early)
604 printk(KERN_INFO "Using ACPI for processor (LAPIC) " 604 printk(KERN_INFO "Using ACPI for processor (LAPIC) "
605 "configuration information\n"); 605 "configuration information\n");
606 606
607 if (!mpf)
608 return;
609
607 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 610 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
608 mpf->mpf_specification); 611 mpf->mpf_specification);
609#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 612#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 0e9f1982b1dd..95777b0faa73 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -7,7 +7,8 @@
7 7
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9 9
10static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 10static inline void
11default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
11{ 12{
12 __raw_spin_lock(lock); 13 __raw_spin_lock(lock);
13} 14}
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index e1e731d78f38..d28bbdc35e4e 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p)
1567 ++p; 1567 ++p;
1568 if (*p == '\0') 1568 if (*p == '\0')
1569 break; 1569 break;
1570 bridge = simple_strtol(p, &endp, 0); 1570 bridge = simple_strtoul(p, &endp, 0);
1571 if (p == endp) 1571 if (p == endp)
1572 break; 1572 break;
1573 1573
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index a42b02b4df68..a35eaa379ff6 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size)
123 123
124 spin_lock_irqsave(&iommu_bitmap_lock, flags); 124 spin_lock_irqsave(&iommu_bitmap_lock, flags);
125 iommu_area_free(iommu_gart_bitmap, offset, size); 125 iommu_area_free(iommu_gart_bitmap, offset, size);
126 if (offset >= next_bit)
127 next_bit = offset + size;
126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127} 129}
128 130
@@ -743,10 +745,8 @@ void __init gart_iommu_init(void)
743 unsigned long scratch; 745 unsigned long scratch;
744 long i; 746 long i;
745 747
746 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) { 748 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
747 printk(KERN_INFO "PCI-GART: No AMD GART found.\n");
748 return; 749 return;
749 }
750 750
751#ifndef CONFIG_AGP_AMD64 751#ifndef CONFIG_AGP_AMD64
752 no_agp = 1; 752 no_agp = 1;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 67465ed89310..309949e9e1c1 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -168,6 +168,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
168 ich_force_enable_hpet); 168 ich_force_enable_hpet);
169DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, 169DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
170 ich_force_enable_hpet); 170 ich_force_enable_hpet);
171DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
172 ich_force_enable_hpet);
171DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, 173DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
172 ich_force_enable_hpet); 174 ich_force_enable_hpet);
173 175
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb9..cc5a2545dd41 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -169,6 +169,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
169 DMI_MATCH(DMI_BOARD_NAME, "0KW626"), 169 DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
170 }, 170 },
171 }, 171 },
172 { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
173 .callback = set_bios_reboot,
174 .ident = "Dell OptiPlex 330",
175 .matches = {
176 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
177 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
178 DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
179 },
180 },
172 { /* Handle problems with rebooting on Dell 2400's */ 181 { /* Handle problems with rebooting on Dell 2400's */
173 .callback = set_bios_reboot, 182 .callback = set_bios_reboot,
174 .ident = "Dell PowerEdge 2400", 183 .ident = "Dell PowerEdge 2400",
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0fa6790c1dd3..bdec76e55594 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
764 .callback = dmi_low_memory_corruption, 764 .callback = dmi_low_memory_corruption,
765 .ident = "Phoenix BIOS", 765 .ident = "Phoenix BIOS",
766 .matches = { 766 .matches = {
767 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 767 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
768 }, 768 },
769 }, 769 },
770#endif 770#endif
@@ -794,6 +794,9 @@ void __init setup_arch(char **cmdline_p)
794 printk(KERN_INFO "Command line: %s\n", boot_command_line); 794 printk(KERN_INFO "Command line: %s\n", boot_command_line);
795#endif 795#endif
796 796
797 /* VMI may relocate the fixmap; do this before touching ioremap area */
798 vmi_init();
799
797 early_cpu_init(); 800 early_cpu_init();
798 early_ioremap_init(); 801 early_ioremap_init();
799 802
@@ -880,13 +883,8 @@ void __init setup_arch(char **cmdline_p)
880 check_efer(); 883 check_efer();
881#endif 884#endif
882 885
883#if defined(CONFIG_VMI) && defined(CONFIG_X86_32) 886 /* Must be before kernel pagetables are setup */
884 /* 887 vmi_activate();
885 * Must be before kernel pagetables are setup
886 * or fixmap area is touched.
887 */
888 vmi_init();
889#endif
890 888
891 /* after early param, so could get panic from serial */ 889 /* after early param, so could get panic from serial */
892 reserve_early_setup_data(); 890 reserve_early_setup_data();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7b1093397319..f71f96fc9e62 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -294,9 +294,7 @@ static void __cpuinit start_secondary(void *unused)
294 * fragile that we want to limit the things done here to the 294 * fragile that we want to limit the things done here to the
295 * most necessary things. 295 * most necessary things.
296 */ 296 */
297#ifdef CONFIG_VMI
298 vmi_bringup(); 297 vmi_bringup();
299#endif
300 cpu_init(); 298 cpu_init();
301 preempt_disable(); 299 preempt_disable();
302 smp_callin(); 300 smp_callin();
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 9ffb01c31c40..1c0dfbca87c1 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
46 cycles_t start, now, prev, end; 46 cycles_t start, now, prev, end;
47 int i; 47 int i;
48 48
49 rdtsc_barrier();
49 start = get_cycles(); 50 start = get_cycles();
51 rdtsc_barrier();
50 /* 52 /*
51 * The measurement runs for 20 msecs: 53 * The measurement runs for 20 msecs:
52 */ 54 */
@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
61 */ 63 */
62 __raw_spin_lock(&sync_lock); 64 __raw_spin_lock(&sync_lock);
63 prev = last_tsc; 65 prev = last_tsc;
66 rdtsc_barrier();
64 now = get_cycles(); 67 now = get_cycles();
68 rdtsc_barrier();
65 last_tsc = now; 69 last_tsc = now;
66 __raw_spin_unlock(&sync_lock); 70 __raw_spin_unlock(&sync_lock);
67 71
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 8b6c393ab9fd..22fd6577156a 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -960,8 +960,6 @@ static inline int __init activate_vmi(void)
960 960
961void __init vmi_init(void) 961void __init vmi_init(void)
962{ 962{
963 unsigned long flags;
964
965 if (!vmi_rom) 963 if (!vmi_rom)
966 probe_vmi_rom(); 964 probe_vmi_rom();
967 else 965 else
@@ -973,13 +971,21 @@ void __init vmi_init(void)
973 971
974 reserve_top_address(-vmi_rom->virtual_top); 972 reserve_top_address(-vmi_rom->virtual_top);
975 973
976 local_irq_save(flags);
977 activate_vmi();
978
979#ifdef CONFIG_X86_IO_APIC 974#ifdef CONFIG_X86_IO_APIC
980 /* This is virtual hardware; timer routing is wired correctly */ 975 /* This is virtual hardware; timer routing is wired correctly */
981 no_timer_check = 1; 976 no_timer_check = 1;
982#endif 977#endif
978}
979
980void vmi_activate(void)
981{
982 unsigned long flags;
983
984 if (!vmi_rom)
985 return;
986
987 local_irq_save(flags);
988 activate_vmi();
983 local_irq_restore(flags & X86_EFLAGS_IF); 989 local_irq_restore(flags & X86_EFLAGS_IF);
984} 990}
985 991
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index b13acb75e822..15c3e6999182 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -310,7 +310,7 @@ static void __init setup_xstate_init(void)
310/* 310/*
311 * Enable and initialize the xsave feature. 311 * Enable and initialize the xsave feature.
312 */ 312 */
313void __init xsave_cntxt_init(void) 313void __ref xsave_cntxt_init(void)
314{ 314{
315 unsigned int eax, ebx, ecx, edx; 315 unsigned int eax, ebx, ecx, edx;
316 316
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ce3251ce5504..b81125f0bdee 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -20,6 +20,8 @@ if VIRTUALIZATION
20config KVM 20config KVM
21 tristate "Kernel-based Virtual Machine (KVM) support" 21 tristate "Kernel-based Virtual Machine (KVM) support"
22 depends on HAVE_KVM 22 depends on HAVE_KVM
23 # for device assignment:
24 depends on PCI
23 select PREEMPT_NOTIFIERS 25 select PREEMPT_NOTIFIERS
24 select MMU_NOTIFIER 26 select MMU_NOTIFIER
25 select ANON_INODES 27 select ANON_INODES
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 8772dc946823..59ebd37ad79e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -548,8 +548,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
548 mutex_lock(&kvm->lock); 548 mutex_lock(&kvm->lock);
549 pit->irq_source_id = kvm_request_irq_source_id(kvm); 549 pit->irq_source_id = kvm_request_irq_source_id(kvm);
550 mutex_unlock(&kvm->lock); 550 mutex_unlock(&kvm->lock);
551 if (pit->irq_source_id < 0) 551 if (pit->irq_source_id < 0) {
552 kfree(pit);
552 return NULL; 553 return NULL;
554 }
553 555
554 mutex_init(&pit->pit_state.lock); 556 mutex_init(&pit->pit_state.lock);
555 mutex_lock(&pit->pit_state.lock); 557 mutex_lock(&pit->pit_state.lock);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a5e64881d9b..410ddbc1aa2e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -314,7 +314,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
314 if (r) 314 if (r)
315 goto out; 315 goto out;
316 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, 316 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
317 rmap_desc_cache, 1); 317 rmap_desc_cache, 4);
318 if (r) 318 if (r)
319 goto out; 319 goto out;
320 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); 320 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
@@ -1038,13 +1038,13 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1038 } 1038 }
1039 1039
1040 rmap_write_protect(vcpu->kvm, sp->gfn); 1040 rmap_write_protect(vcpu->kvm, sp->gfn);
1041 kvm_unlink_unsync_page(vcpu->kvm, sp);
1041 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 1042 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1042 kvm_mmu_zap_page(vcpu->kvm, sp); 1043 kvm_mmu_zap_page(vcpu->kvm, sp);
1043 return 1; 1044 return 1;
1044 } 1045 }
1045 1046
1046 kvm_mmu_flush_tlb(vcpu); 1047 kvm_mmu_flush_tlb(vcpu);
1047 kvm_unlink_unsync_page(vcpu->kvm, sp);
1048 return 0; 1048 return 0;
1049} 1049}
1050 1050
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 613ec9aa674a..84eee43bbe74 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -331,6 +331,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
331 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], 331 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
332 &curr_pte, sizeof(curr_pte)); 332 &curr_pte, sizeof(curr_pte));
333 if (r || curr_pte != gw->ptes[level - 2]) { 333 if (r || curr_pte != gw->ptes[level - 2]) {
334 kvm_mmu_put_page(shadow_page, sptep);
334 kvm_release_pfn_clean(sw->pfn); 335 kvm_release_pfn_clean(sw->pfn);
335 sw->sptep = NULL; 336 sw->sptep = NULL;
336 return 1; 337 return 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2643b430d83a..a4018b01e1f9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3149,7 +3149,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
3149 3149
3150 if (cpu_has_virtual_nmis()) { 3150 if (cpu_has_virtual_nmis()) {
3151 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { 3151 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
3152 if (vmx_nmi_enabled(vcpu)) { 3152 if (vcpu->arch.interrupt.pending) {
3153 enable_nmi_window(vcpu);
3154 } else if (vmx_nmi_enabled(vcpu)) {
3153 vcpu->arch.nmi_pending = false; 3155 vcpu->arch.nmi_pending = false;
3154 vcpu->arch.nmi_injected = true; 3156 vcpu->arch.nmi_injected = true;
3155 } else { 3157 } else {
@@ -3564,7 +3566,8 @@ static int __init vmx_init(void)
3564 bypass_guest_pf = 0; 3566 bypass_guest_pf = 0;
3565 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | 3567 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3566 VMX_EPT_WRITABLE_MASK | 3568 VMX_EPT_WRITABLE_MASK |
3567 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); 3569 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
3570 VMX_EPT_IGMT_BIT);
3568 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, 3571 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3569 VMX_EPT_EXECUTABLE_MASK); 3572 VMX_EPT_EXECUTABLE_MASK);
3570 kvm_enable_tdp(); 3573 kvm_enable_tdp();
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index 3e010d21fdd7..ec5edc339da6 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -352,6 +352,7 @@ enum vmcs_field {
352#define VMX_EPT_READABLE_MASK 0x1ull 352#define VMX_EPT_READABLE_MASK 0x1ull
353#define VMX_EPT_WRITABLE_MASK 0x2ull 353#define VMX_EPT_WRITABLE_MASK 0x2ull
354#define VMX_EPT_EXECUTABLE_MASK 0x4ull 354#define VMX_EPT_EXECUTABLE_MASK 0x4ull
355#define VMX_EPT_IGMT_BIT (1ull << 6)
355 356
356#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul 357#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
357 358
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 0e331652681e..52145007bd7e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -7,6 +7,7 @@
7 * This file provides all the same external entries as smp.c but uses 7 * This file provides all the same external entries as smp.c but uses
8 * the voyager hal to provide the functionality 8 * the voyager hal to provide the functionality
9 */ 9 */
10#include <linux/cpu.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/mm.h> 12#include <linux/mm.h>
12#include <linux/kernel_stat.h> 13#include <linux/kernel_stat.h>
@@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
1790 x86_write_percpu(cpu_number, hard_smp_processor_id()); 1791 x86_write_percpu(cpu_number, hard_smp_processor_id());
1791} 1792}
1792 1793
1794static void voyager_send_call_func(cpumask_t callmask)
1795{
1796 __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
1797 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1798}
1799
1800static void voyager_send_call_func_single(int cpu)
1801{
1802 send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
1803}
1804
1793struct smp_ops smp_ops = { 1805struct smp_ops smp_ops = {
1794 .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, 1806 .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
1795 .smp_prepare_cpus = voyager_smp_prepare_cpus, 1807 .smp_prepare_cpus = voyager_smp_prepare_cpus,
@@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
1799 .smp_send_stop = voyager_smp_send_stop, 1811 .smp_send_stop = voyager_smp_send_stop,
1800 .smp_send_reschedule = voyager_smp_send_reschedule, 1812 .smp_send_reschedule = voyager_smp_send_reschedule,
1801 1813
1802 .send_call_func_ipi = native_send_call_func_ipi, 1814 .send_call_func_ipi = voyager_send_call_func,
1803 .send_call_func_single_ipi = native_send_call_func_single_ipi, 1815 .send_call_func_single_ipi = voyager_send_call_func_single,
1804}; 1816};
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 847c164725f4..8518c678d83f 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -222,6 +222,41 @@ static void __init remap_numa_kva(void)
222 } 222 }
223} 223}
224 224
225#ifdef CONFIG_HIBERNATION
226/**
227 * resume_map_numa_kva - add KVA mapping to the temporary page tables created
228 * during resume from hibernation
229 * @pgd_base - temporary resume page directory
230 */
231void resume_map_numa_kva(pgd_t *pgd_base)
232{
233 int node;
234
235 for_each_online_node(node) {
236 unsigned long start_va, start_pfn, size, pfn;
237
238 start_va = (unsigned long)node_remap_start_vaddr[node];
239 start_pfn = node_remap_start_pfn[node];
240 size = node_remap_size[node];
241
242 printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
243
244 for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
245 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
246 pgd_t *pgd = pgd_base + pgd_index(vaddr);
247 pud_t *pud = pud_offset(pgd, vaddr);
248 pmd_t *pmd = pmd_offset(pud, vaddr);
249
250 set_pmd(pmd, pfn_pmd(start_pfn + pfn,
251 PAGE_KERNEL_LARGE_EXEC));
252
253 printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
254 __FUNCTION__, vaddr, start_pfn + pfn);
255 }
256 }
257}
258#endif
259
225static unsigned long calculate_numa_remap_pages(void) 260static unsigned long calculate_numa_remap_pages(void)
226{ 261{
227 int nid; 262 int nid;
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 022cd41ea9b4..202864ad49a7 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -401,14 +401,13 @@ static int __init ppro_init(char **cpu_type)
401 *cpu_type = "i386/pii"; 401 *cpu_type = "i386/pii";
402 break; 402 break;
403 case 6 ... 8: 403 case 6 ... 8:
404 case 10 ... 11:
404 *cpu_type = "i386/piii"; 405 *cpu_type = "i386/piii";
405 break; 406 break;
406 case 9: 407 case 9:
408 case 13:
407 *cpu_type = "i386/p6_mobile"; 409 *cpu_type = "i386/p6_mobile";
408 break; 410 break;
409 case 10 ... 13:
410 *cpu_type = "i386/p6";
411 break;
412 case 14: 411 case 14:
413 *cpu_type = "i386/core"; 412 *cpu_type = "i386/core";
414 break; 413 break;
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 3f1b81a83e2e..e9f80c744cf3 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -69,7 +69,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
69 int i; 69 int i;
70 70
71 if (!reset_value) { 71 if (!reset_value) {
72 reset_value = kmalloc(sizeof(unsigned) * num_counters, 72 reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
73 GFP_ATOMIC); 73 GFP_ATOMIC);
74 if (!reset_value) 74 if (!reset_value)
75 return; 75 return;
@@ -156,6 +156,8 @@ static void ppro_start(struct op_msrs const * const msrs)
156 unsigned int low, high; 156 unsigned int low, high;
157 int i; 157 int i;
158 158
159 if (!reset_value)
160 return;
159 for (i = 0; i < num_counters; ++i) { 161 for (i = 0; i < num_counters; ++i) {
160 if (reset_value[i]) { 162 if (reset_value[i]) {
161 CTRL_READ(low, high, msrs, i); 163 CTRL_READ(low, high, msrs, i);
@@ -171,6 +173,8 @@ static void ppro_stop(struct op_msrs const * const msrs)
171 unsigned int low, high; 173 unsigned int low, high;
172 int i; 174 int i;
173 175
176 if (!reset_value)
177 return;
174 for (i = 0; i < num_counters; ++i) { 178 for (i = 0; i < num_counters; ++i) {
175 if (!reset_value[i]) 179 if (!reset_value[i])
176 continue; 180 continue;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 3c27a809393b..2051dc96b8e9 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -496,21 +496,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
496 pci_siemens_interrupt_controller); 496 pci_siemens_interrupt_controller);
497 497
498/* 498/*
499 * Regular PCI devices have 256 bytes, but AMD Family 10h Opteron ext config 499 * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have
500 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 500 * 4096 bytes configuration space for each function of their processor
501 * access it. Maybe we don't have a way to generate extended config space 501 * configuration space.
502 * accesses. So check it
503 */ 502 */
504static void fam10h_pci_cfg_space_size(struct pci_dev *dev) 503static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev)
505{ 504{
506 dev->cfg_size = pci_cfg_space_size_ext(dev); 505 dev->cfg_size = pci_cfg_space_size_ext(dev);
507} 506}
508 507DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size);
509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, fam10h_pci_cfg_space_size); 508DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size);
510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, fam10h_pci_cfg_space_size); 509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size);
511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, fam10h_pci_cfg_space_size); 510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size);
512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, fam10h_pci_cfg_space_size); 511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size);
513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, fam10h_pci_cfg_space_size); 512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size);
513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size);
514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size);
515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size);
516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size);
514 517
515/* 518/*
516 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from 519 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index f2b6e3f11bfc..81197c62d5b3 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -12,6 +12,7 @@
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mmzone.h>
15 16
16/* Defined in hibernate_asm_32.S */ 17/* Defined in hibernate_asm_32.S */
17extern int restore_image(void); 18extern int restore_image(void);
@@ -127,6 +128,9 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
127 } 128 }
128 } 129 }
129 } 130 }
131
132 resume_map_numa_kva(pgd_base);
133
130 return 0; 134 return 0;
131} 135}
132 136
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 688936044dc9..636ef4caa52d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -661,12 +661,11 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
661 * For 64-bit, we must skip the Xen hole in the middle of the address 661 * For 64-bit, we must skip the Xen hole in the middle of the address
662 * space, just after the big x86-64 virtual hole. 662 * space, just after the big x86-64 virtual hole.
663 */ 663 */
664static int xen_pgd_walk(struct mm_struct *mm, 664static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
665 int (*func)(struct mm_struct *mm, struct page *, 665 int (*func)(struct mm_struct *mm, struct page *,
666 enum pt_level), 666 enum pt_level),
667 unsigned long limit) 667 unsigned long limit)
668{ 668{
669 pgd_t *pgd = mm->pgd;
670 int flush = 0; 669 int flush = 0;
671 unsigned hole_low, hole_high; 670 unsigned hole_low, hole_high;
672 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; 671 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
@@ -753,6 +752,14 @@ out:
753 return flush; 752 return flush;
754} 753}
755 754
755static int xen_pgd_walk(struct mm_struct *mm,
756 int (*func)(struct mm_struct *mm, struct page *,
757 enum pt_level),
758 unsigned long limit)
759{
760 return __xen_pgd_walk(mm, mm->pgd, func, limit);
761}
762
756/* If we're using split pte locks, then take the page's lock and 763/* If we're using split pte locks, then take the page's lock and
757 return a pointer to it. Otherwise return NULL. */ 764 return a pointer to it. Otherwise return NULL. */
758static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) 765static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
@@ -854,7 +861,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
854 861
855 xen_mc_batch(); 862 xen_mc_batch();
856 863
857 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { 864 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
858 /* re-enable interrupts for flushing */ 865 /* re-enable interrupts for flushing */
859 xen_mc_issue(0); 866 xen_mc_issue(0);
860 867
@@ -998,7 +1005,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
998 PT_PMD); 1005 PT_PMD);
999#endif 1006#endif
1000 1007
1001 xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT); 1008 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1002 1009
1003 xen_mc_issue(0); 1010 xen_mc_issue(0);
1004} 1011}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d77da613b1d2..acd9b6705e02 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -362,7 +362,7 @@ static void xen_cpu_die(unsigned int cpu)
362 alternatives_smp_switch(0); 362 alternatives_smp_switch(0);
363} 363}
364 364
365static void xen_play_dead(void) 365static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
366{ 366{
367 play_dead_common(); 367 play_dead_common();
368 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 368 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index d7422dc2a55c..9e1afae8461f 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -49,7 +49,7 @@ bool xen_vcpu_stolen(int vcpu);
49 49
50void xen_mark_init_mm_pinned(void); 50void xen_mark_init_mm_pinned(void);
51 51
52void __init xen_setup_vcpu_info_placement(void); 52void xen_setup_vcpu_info_placement(void);
53 53
54#ifdef CONFIG_SMP 54#ifdef CONFIG_SMP
55void xen_smp_init(void); 55void xen_smp_init(void);