aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-06-18 04:53:12 -0400
committerIngo Molnar <mingo@elte.hu>2010-06-18 04:53:19 -0400
commit646b1db4956ba8bf748b835b5eba211133d91c2e (patch)
tree061166d873d9da9cf83044a7593ad111787076c5 /arch/x86
parent0f2c3de2ba110626515234d5d584fb1b0c0749a2 (diff)
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
Merge commit 'v2.6.35-rc3' into perf/core
Merge reason: Go from -rc1 base to -rc3 base, merge in fixes.
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/.gitignore3
-rw-r--r--arch/x86/boot/compressed/mkpiggy.c2
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S4
-rw-r--r--arch/x86/include/asm/cache.h2
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/suspend_32.h2
-rw-r--r--arch/x86/include/asm/suspend_64.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S2
-rw-r--r--arch/x86/kernel/amd_iommu.c16
-rw-r--r--arch/x86/kernel/amd_iommu_init.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c10
-rw-r--r--arch/x86/kernel/init_task.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c8
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/svm.c96
-rw-r--r--arch/x86/mm/numa.c6
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/pci/legacy.c42
-rw-r--r--arch/x86/power/cpu.c4
-rw-r--r--arch/x86/xen/suspend.c4
23 files changed, 189 insertions, 52 deletions
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore
new file mode 100644
index 000000000000..028079065af6
--- /dev/null
+++ b/arch/x86/.gitignore
@@ -0,0 +1,3 @@
1boot/compressed/vmlinux
2tools/test_get_len
3
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index bcbd36c41432..5c228129d175 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -77,7 +77,7 @@ int main(int argc, char *argv[])
77 offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ 77 offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
78 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ 78 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
79 79
80 printf(".section \".rodata.compressed\",\"a\",@progbits\n"); 80 printf(".section \".rodata..compressed\",\"a\",@progbits\n");
81 printf(".globl z_input_len\n"); 81 printf(".globl z_input_len\n");
82 printf("z_input_len = %lu\n", ilen); 82 printf("z_input_len = %lu\n", ilen);
83 printf(".globl z_output_len\n"); 83 printf(".globl z_output_len\n");
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index a6f1a59a5b0c..5ddabceee124 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -26,8 +26,8 @@ SECTIONS
26 HEAD_TEXT 26 HEAD_TEXT
27 _ehead = . ; 27 _ehead = . ;
28 } 28 }
29 .rodata.compressed : { 29 .rodata..compressed : {
30 *(.rodata.compressed) 30 *(.rodata..compressed)
31 } 31 }
32 .text : { 32 .text : {
33 _text = .; /* Text */ 33 _text = .; /* Text */
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 2f9047cfaaca..48f99f15452e 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -7,7 +7,7 @@
7#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 7#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 8#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9 9
10#define __read_mostly __attribute__((__section__(".data.read_mostly"))) 10#define __read_mostly __attribute__((__section__(".data..read_mostly")))
11 11
12#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT 12#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
13#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) 13#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b49d8ca228f6..8c7ae4318629 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -110,6 +110,7 @@
110#define MSR_AMD64_PATCH_LOADER 0xc0010020 110#define MSR_AMD64_PATCH_LOADER 0xc0010020
111#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 111#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
112#define MSR_AMD64_OSVW_STATUS 0xc0010141 112#define MSR_AMD64_OSVW_STATUS 0xc0010141
113#define MSR_AMD64_DC_CFG 0xc0011022
113#define MSR_AMD64_IBSFETCHCTL 0xc0011030 114#define MSR_AMD64_IBSFETCHCTL 0xc0011030
114#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 115#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
115#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 116#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 8d8797eae5d7..cd2a31dc5fb8 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -53,6 +53,8 @@ extern int pcibios_last_bus;
53extern struct pci_bus *pci_root_bus; 53extern struct pci_bus *pci_root_bus;
54extern struct pci_ops pci_root_ops; 54extern struct pci_ops pci_root_ops;
55 55
56void pcibios_scan_specific_bus(int busn);
57
56/* pci-irq.c */ 58/* pci-irq.c */
57 59
58struct irq_info { 60struct irq_info {
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 48dcfa62ea07..fd921c3a6841 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -15,6 +15,8 @@ static inline int arch_prepare_suspend(void) { return 0; }
15struct saved_context { 15struct saved_context {
16 u16 es, fs, gs, ss; 16 u16 es, fs, gs, ss;
17 unsigned long cr0, cr2, cr3, cr4; 17 unsigned long cr0, cr2, cr3, cr4;
18 u64 misc_enable;
19 bool misc_enable_saved;
18 struct desc_ptr gdt; 20 struct desc_ptr gdt;
19 struct desc_ptr idt; 21 struct desc_ptr idt;
20 u16 ldt; 22 u16 ldt;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 06284f42b759..8d942afae681 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -27,6 +27,8 @@ struct saved_context {
27 u16 ds, es, fs, gs, ss; 27 u16 ds, es, fs, gs, ss;
28 unsigned long gs_base, gs_kernel_base, fs_base; 28 unsigned long gs_base, gs_kernel_base, fs_base;
29 unsigned long cr0, cr2, cr3, cr4, cr8; 29 unsigned long cr0, cr2, cr3, cr4, cr8;
30 u64 misc_enable;
31 bool misc_enable_saved;
30 unsigned long efer; 32 unsigned long efer;
31 u16 gdt_pad; 33 u16 gdt_pad;
32 u16 gdt_limit; 34 u16 gdt_limit;
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 8ded418b0593..13ab720573e3 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -1,4 +1,4 @@
1 .section .text.page_aligned 1 .section .text..page_aligned
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/page_types.h> 4#include <asm/page_types.h>
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fa5a1474cd18..0d20286d78c6 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1487,6 +1487,7 @@ static int __attach_device(struct device *dev,
1487 struct protection_domain *domain) 1487 struct protection_domain *domain)
1488{ 1488{
1489 struct iommu_dev_data *dev_data, *alias_data; 1489 struct iommu_dev_data *dev_data, *alias_data;
1490 int ret;
1490 1491
1491 dev_data = get_dev_data(dev); 1492 dev_data = get_dev_data(dev);
1492 alias_data = get_dev_data(dev_data->alias); 1493 alias_data = get_dev_data(dev_data->alias);
@@ -1498,13 +1499,14 @@ static int __attach_device(struct device *dev,
1498 spin_lock(&domain->lock); 1499 spin_lock(&domain->lock);
1499 1500
1500 /* Some sanity checks */ 1501 /* Some sanity checks */
1502 ret = -EBUSY;
1501 if (alias_data->domain != NULL && 1503 if (alias_data->domain != NULL &&
1502 alias_data->domain != domain) 1504 alias_data->domain != domain)
1503 return -EBUSY; 1505 goto out_unlock;
1504 1506
1505 if (dev_data->domain != NULL && 1507 if (dev_data->domain != NULL &&
1506 dev_data->domain != domain) 1508 dev_data->domain != domain)
1507 return -EBUSY; 1509 goto out_unlock;
1508 1510
1509 /* Do real assignment */ 1511 /* Do real assignment */
1510 if (dev_data->alias != dev) { 1512 if (dev_data->alias != dev) {
@@ -1520,10 +1522,14 @@ static int __attach_device(struct device *dev,
1520 1522
1521 atomic_inc(&dev_data->bind); 1523 atomic_inc(&dev_data->bind);
1522 1524
1525 ret = 0;
1526
1527out_unlock:
1528
1523 /* ready */ 1529 /* ready */
1524 spin_unlock(&domain->lock); 1530 spin_unlock(&domain->lock);
1525 1531
1526 return 0; 1532 return ret;
1527} 1533}
1528 1534
1529/* 1535/*
@@ -2324,10 +2330,6 @@ int __init amd_iommu_init_dma_ops(void)
2324 2330
2325 iommu_detected = 1; 2331 iommu_detected = 1;
2326 swiotlb = 0; 2332 swiotlb = 0;
2327#ifdef CONFIG_GART_IOMMU
2328 gart_iommu_aperture_disabled = 1;
2329 gart_iommu_aperture = 0;
2330#endif
2331 2333
2332 /* Make the driver finally visible to the drivers */ 2334 /* Make the driver finally visible to the drivers */
2333 dma_ops = &amd_iommu_dma_ops; 2335 dma_ops = &amd_iommu_dma_ops;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 3bacb4d0844c..3cc63e2b8dd4 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -287,8 +287,12 @@ static u8 * __init iommu_map_mmio_space(u64 address)
287{ 287{
288 u8 *ret; 288 u8 *ret;
289 289
290 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) 290 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
291 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
292 address);
293 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
291 return NULL; 294 return NULL;
295 }
292 296
293 ret = ioremap_nocache(address, MMIO_REGION_LENGTH); 297 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
294 if (ret != NULL) 298 if (ret != NULL)
@@ -1314,7 +1318,7 @@ static int __init amd_iommu_init(void)
1314 ret = amd_iommu_init_dma_ops(); 1318 ret = amd_iommu_init_dma_ops();
1315 1319
1316 if (ret) 1320 if (ret)
1317 goto free; 1321 goto free_disable;
1318 1322
1319 amd_iommu_init_api(); 1323 amd_iommu_init_api();
1320 1324
@@ -1332,9 +1336,10 @@ static int __init amd_iommu_init(void)
1332out: 1336out:
1333 return ret; 1337 return ret;
1334 1338
1335free: 1339free_disable:
1336 disable_iommus(); 1340 disable_iommus();
1337 1341
1342free:
1338 amd_iommu_uninit_devices(); 1343 amd_iommu_uninit_devices();
1339 1344
1340 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1345 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
@@ -1353,6 +1358,15 @@ free:
1353 1358
1354 free_unity_maps(); 1359 free_unity_maps();
1355 1360
1361#ifdef CONFIG_GART_IOMMU
1362 /*
1363 * We failed to initialize the AMD IOMMU - try fallback to GART
1364 * if possible.
1365 */
1366 gart_iommu_init();
1367
1368#endif
1369
1356 goto out; 1370 goto out;
1357} 1371}
1358 1372
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 707165dbc203..18cc42562250 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -36,6 +36,7 @@
36#include <linux/fs.h> 36#include <linux/fs.h>
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/debugfs.h> 38#include <linux/debugfs.h>
39#include <linux/edac_mce.h>
39 40
40#include <asm/processor.h> 41#include <asm/processor.h>
41#include <asm/hw_irq.h> 42#include <asm/hw_irq.h>
@@ -169,6 +170,15 @@ void mce_log(struct mce *mce)
169 entry = rcu_dereference_check_mce(mcelog.next); 170 entry = rcu_dereference_check_mce(mcelog.next);
170 for (;;) { 171 for (;;) {
171 /* 172 /*
173 * If edac_mce is enabled, it will check the error type
174 * and will process it, if it is a known error.
175 * Otherwise, the error will be sent through mcelog
176 * interface
177 */
178 if (edac_mce_parse(mce))
179 return;
180
181 /*
172 * When the buffer fills up discard new entries. 182 * When the buffer fills up discard new entries.
173 * Assume that the earlier errors are the more 183 * Assume that the earlier errors are the more
174 * interesting ones: 184 * interesting ones:
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index 3a54dcb9cd0e..43e9ccf44947 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
34/* 34/*
35 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 35 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
36 * no more per-task TSS's. The TSS size is kept cacheline-aligned 36 * no more per-task TSS's. The TSS size is kept cacheline-aligned
37 * so they are allowed to end up in the .data.cacheline_aligned 37 * so they are allowed to end up in the .data..cacheline_aligned
38 * section. Since TSS's are completely CPU-local, we want them 38 * section. Since TSS's are completely CPU-local, we want them
39 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 39 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
40 */ 40 */
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index a867940a6dfc..de3b63ae3da2 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -21,12 +21,6 @@
21#include <asm/cpu.h> 21#include <asm/cpu.h>
22#include <asm/stackprotector.h> 22#include <asm/stackprotector.h>
23 23
24#ifdef CONFIG_DEBUG_PER_CPU_MAPS
25# define DBG(fmt, ...) pr_dbg(fmt, ##__VA_ARGS__)
26#else
27# define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0)
28#endif
29
30DEFINE_PER_CPU(int, cpu_number); 24DEFINE_PER_CPU(int, cpu_number);
31EXPORT_PER_CPU_SYMBOL(cpu_number); 25EXPORT_PER_CPU_SYMBOL(cpu_number);
32 26
@@ -247,7 +241,7 @@ void __init setup_per_cpu_areas(void)
247#endif 241#endif
248#endif 242#endif
249 /* 243 /*
250 * Up to this point, the boot CPU has been using .data.init 244 * Up to this point, the boot CPU has been using .init.data
251 * area. Reload any changed state for the boot CPU. 245 * area. Reload any changed state for the boot CPU.
252 */ 246 */
253 if (cpu == boot_cpu_id) 247 if (cpu == boot_cpu_id)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 37462f1ddba5..c4f33b2e77d6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -686,7 +686,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
686static void __cpuinit announce_cpu(int cpu, int apicid) 686static void __cpuinit announce_cpu(int cpu, int apicid)
687{ 687{
688 static int current_node = -1; 688 static int current_node = -1;
689 int node = cpu_to_node(cpu); 689 int node = early_cpu_to_node(cpu);
690 690
691 if (system_state == SYSTEM_BOOTING) { 691 if (system_state == SYSTEM_BOOTING) {
692 if (node != current_node) { 692 if (node != current_node) {
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 2cc249718c46..d0bb52296fa3 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -97,7 +97,7 @@ SECTIONS
97 HEAD_TEXT 97 HEAD_TEXT
98#ifdef CONFIG_X86_32 98#ifdef CONFIG_X86_32
99 . = ALIGN(PAGE_SIZE); 99 . = ALIGN(PAGE_SIZE);
100 *(.text.page_aligned) 100 *(.text..page_aligned)
101#endif 101#endif
102 . = ALIGN(8); 102 . = ALIGN(8);
103 _stext = .; 103 _stext = .;
@@ -305,7 +305,7 @@ SECTIONS
305 . = ALIGN(PAGE_SIZE); 305 . = ALIGN(PAGE_SIZE);
306 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 306 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
307 __bss_start = .; 307 __bss_start = .;
308 *(.bss.page_aligned) 308 *(.bss..page_aligned)
309 *(.bss) 309 *(.bss)
310 . = ALIGN(4); 310 . = ALIGN(4);
311 __bss_stop = .; 311 __bss_stop = .;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 81563e76e28f..a6f695d76928 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1815,6 +1815,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1815 1815
1816 spte |= PT_WRITABLE_MASK; 1816 spte |= PT_WRITABLE_MASK;
1817 1817
1818 if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
1819 spte &= ~PT_USER_MASK;
1820
1818 /* 1821 /*
1819 * Optimization: for pte sync, if spte was writable the hash 1822 * Optimization: for pte sync, if spte was writable the hash
1820 * lookup is unnecessary (and expensive). Write protection 1823 * lookup is unnecessary (and expensive). Write protection
@@ -1870,6 +1873,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1870 1873
1871 child = page_header(pte & PT64_BASE_ADDR_MASK); 1874 child = page_header(pte & PT64_BASE_ADDR_MASK);
1872 mmu_page_remove_parent_pte(child, sptep); 1875 mmu_page_remove_parent_pte(child, sptep);
1876 __set_spte(sptep, shadow_trap_nonpresent_pte);
1877 kvm_flush_remote_tlbs(vcpu->kvm);
1873 } else if (pfn != spte_to_pfn(*sptep)) { 1878 } else if (pfn != spte_to_pfn(*sptep)) {
1874 pgprintk("hfn old %lx new %lx\n", 1879 pgprintk("hfn old %lx new %lx\n",
1875 spte_to_pfn(*sptep), pfn); 1880 spte_to_pfn(*sptep), pfn);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 96dc232bfc56..ce438e0fdd26 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -28,6 +28,7 @@
28#include <linux/ftrace_event.h> 28#include <linux/ftrace_event.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include <asm/tlbflush.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
32 33
33#include <asm/virtext.h> 34#include <asm/virtext.h>
@@ -56,6 +57,8 @@ MODULE_LICENSE("GPL");
56 57
57#define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 58#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
58 59
60static bool erratum_383_found __read_mostly;
61
59static const u32 host_save_user_msrs[] = { 62static const u32 host_save_user_msrs[] = {
60#ifdef CONFIG_X86_64 63#ifdef CONFIG_X86_64
61 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, 64 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
@@ -374,6 +377,31 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
374 svm->vmcb->control.event_inj_err = error_code; 377 svm->vmcb->control.event_inj_err = error_code;
375} 378}
376 379
380static void svm_init_erratum_383(void)
381{
382 u32 low, high;
383 int err;
384 u64 val;
385
386 /* Only Fam10h is affected */
387 if (boot_cpu_data.x86 != 0x10)
388 return;
389
390 /* Use _safe variants to not break nested virtualization */
391 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
392 if (err)
393 return;
394
395 val |= (1ULL << 47);
396
397 low = lower_32_bits(val);
398 high = upper_32_bits(val);
399
400 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
401
402 erratum_383_found = true;
403}
404
377static int has_svm(void) 405static int has_svm(void)
378{ 406{
379 const char *msg; 407 const char *msg;
@@ -429,6 +457,8 @@ static int svm_hardware_enable(void *garbage)
429 457
430 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); 458 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
431 459
460 svm_init_erratum_383();
461
432 return 0; 462 return 0;
433} 463}
434 464
@@ -1410,8 +1440,59 @@ static int nm_interception(struct vcpu_svm *svm)
1410 return 1; 1440 return 1;
1411} 1441}
1412 1442
1413static int mc_interception(struct vcpu_svm *svm) 1443static bool is_erratum_383(void)
1414{ 1444{
1445 int err, i;
1446 u64 value;
1447
1448 if (!erratum_383_found)
1449 return false;
1450
1451 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1452 if (err)
1453 return false;
1454
1455 /* Bit 62 may or may not be set for this mce */
1456 value &= ~(1ULL << 62);
1457
1458 if (value != 0xb600000000010015ULL)
1459 return false;
1460
1461 /* Clear MCi_STATUS registers */
1462 for (i = 0; i < 6; ++i)
1463 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1464
1465 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1466 if (!err) {
1467 u32 low, high;
1468
1469 value &= ~(1ULL << 2);
1470 low = lower_32_bits(value);
1471 high = upper_32_bits(value);
1472
1473 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1474 }
1475
1476 /* Flush tlb to evict multi-match entries */
1477 __flush_tlb_all();
1478
1479 return true;
1480}
1481
1482static void svm_handle_mce(struct vcpu_svm *svm)
1483{
1484 if (is_erratum_383()) {
1485 /*
1486 * Erratum 383 triggered. Guest state is corrupt so kill the
1487 * guest.
1488 */
1489 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1490
1491 set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests);
1492
1493 return;
1494 }
1495
1415 /* 1496 /*
1416 * On an #MC intercept the MCE handler is not called automatically in 1497 * On an #MC intercept the MCE handler is not called automatically in
1417 * the host. So do it by hand here. 1498 * the host. So do it by hand here.
@@ -1420,6 +1501,11 @@ static int mc_interception(struct vcpu_svm *svm)
1420 "int $0x12\n"); 1501 "int $0x12\n");
1421 /* not sure if we ever come back to this point */ 1502 /* not sure if we ever come back to this point */
1422 1503
1504 return;
1505}
1506
1507static int mc_interception(struct vcpu_svm *svm)
1508{
1423 return 1; 1509 return 1;
1424} 1510}
1425 1511
@@ -3088,6 +3174,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3088 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); 3174 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3089 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); 3175 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3090 } 3176 }
3177
3178 /*
3179 * We need to handle MC intercepts here before the vcpu has a chance to
3180 * change the physical cpu
3181 */
3182 if (unlikely(svm->vmcb->control.exit_code ==
3183 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3184 svm_handle_mce(svm);
3091} 3185}
3092 3186
3093#undef R 3187#undef R
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 550df481accd..787c52ca49c3 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -3,12 +3,6 @@
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/bootmem.h> 4#include <linux/bootmem.h>
5 5
6#ifdef CONFIG_DEBUG_PER_CPU_MAPS
7# define DBG(x...) printk(KERN_DEBUG x)
8#else
9# define DBG(x...)
10#endif
11
12/* 6/*
13 * Which logical CPUs are on which nodes 7 * Which logical CPUs are on which nodes
14 */ 8 */
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 97da2ba9344b..6fdb3ec30c31 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -96,6 +96,7 @@ EXPORT_SYMBOL(pcibios_align_resource);
96 * the fact the PCI specs explicitly allow address decoders to be 96 * the fact the PCI specs explicitly allow address decoders to be
97 * shared between expansion ROMs and other resource regions, it's 97 * shared between expansion ROMs and other resource regions, it's
98 * at least dangerous) 98 * at least dangerous)
99 * - bad resource sizes or overlaps with other regions
99 * 100 *
100 * Our solution: 101 * Our solution:
101 * (1) Allocate resources for all buses behind PCI-to-PCI bridges. 102 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
@@ -136,6 +137,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
136 * child resource allocations in this 137 * child resource allocations in this
137 * range. 138 * range.
138 */ 139 */
140 r->start = r->end = 0;
139 r->flags = 0; 141 r->flags = 0;
140 } 142 }
141 } 143 }
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index 0db5eaf54560..8d460eaf524f 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -11,28 +11,14 @@
11 */ 11 */
12static void __devinit pcibios_fixup_peer_bridges(void) 12static void __devinit pcibios_fixup_peer_bridges(void)
13{ 13{
14 int n, devfn; 14 int n;
15 long node;
16 15
17 if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff) 16 if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff)
18 return; 17 return;
19 DBG("PCI: Peer bridge fixup\n"); 18 DBG("PCI: Peer bridge fixup\n");
20 19
21 for (n=0; n <= pcibios_last_bus; n++) { 20 for (n=0; n <= pcibios_last_bus; n++)
22 u32 l; 21 pcibios_scan_specific_bus(n);
23 if (pci_find_bus(0, n))
24 continue;
25 node = get_mp_bus_to_node(n);
26 for (devfn = 0; devfn < 256; devfn += 8) {
27 if (!raw_pci_read(0, n, devfn, PCI_VENDOR_ID, 2, &l) &&
28 l != 0x0000 && l != 0xffff) {
29 DBG("Found device at %02x:%02x [%04x]\n", n, devfn, l);
30 printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n);
31 pci_scan_bus_on_node(n, &pci_root_ops, node);
32 break;
33 }
34 }
35 }
36} 22}
37 23
38int __init pci_legacy_init(void) 24int __init pci_legacy_init(void)
@@ -50,6 +36,28 @@ int __init pci_legacy_init(void)
50 return 0; 36 return 0;
51} 37}
52 38
39void pcibios_scan_specific_bus(int busn)
40{
41 int devfn;
42 long node;
43 u32 l;
44
45 if (pci_find_bus(0, busn))
46 return;
47
48 node = get_mp_bus_to_node(busn);
49 for (devfn = 0; devfn < 256; devfn += 8) {
50 if (!raw_pci_read(0, busn, devfn, PCI_VENDOR_ID, 2, &l) &&
51 l != 0x0000 && l != 0xffff) {
52 DBG("Found device at %02x:%02x [%04x]\n", busn, devfn, l);
53 printk(KERN_INFO "PCI: Discovered peer bus %02x\n", busn);
54 pci_scan_bus_on_node(busn, &pci_root_ops, node);
55 return;
56 }
57 }
58}
59EXPORT_SYMBOL_GPL(pcibios_scan_specific_bus);
60
53int __init pci_subsys_init(void) 61int __init pci_subsys_init(void)
54{ 62{
55 /* 63 /*
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0a979f3e5b8a..1290ba54b350 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -105,6 +105,8 @@ static void __save_processor_state(struct saved_context *ctxt)
105 ctxt->cr4 = read_cr4(); 105 ctxt->cr4 = read_cr4();
106 ctxt->cr8 = read_cr8(); 106 ctxt->cr8 = read_cr8();
107#endif 107#endif
108 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
109 &ctxt->misc_enable);
108} 110}
109 111
110/* Needed by apm.c */ 112/* Needed by apm.c */
@@ -152,6 +154,8 @@ static void fix_processor_context(void)
152 */ 154 */
153static void __restore_processor_state(struct saved_context *ctxt) 155static void __restore_processor_state(struct saved_context *ctxt)
154{ 156{
157 if (ctxt->misc_enable_saved)
158 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
155 /* 159 /*
156 * control registers 160 * control registers
157 */ 161 */
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 987267f79bf5..a9c661108034 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void *data)
60 60
61void xen_arch_resume(void) 61void xen_arch_resume(void)
62{ 62{
63 smp_call_function(xen_vcpu_notify_restore, 63 on_each_cpu(xen_vcpu_notify_restore,
64 (void *)CLOCK_EVT_NOTIFY_RESUME, 1); 64 (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
65} 65}