diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/suspend_32.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/suspend_64.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 96 | ||||
-rw-r--r-- | arch/x86/pci/i386.c | 2 | ||||
-rw-r--r-- | arch/x86/power/cpu.c | 4 |
7 files changed, 111 insertions, 1 deletions
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b49d8ca228f6..8c7ae4318629 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -110,6 +110,7 @@ | |||
110 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 | 110 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 |
111 | #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 | 111 | #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 |
112 | #define MSR_AMD64_OSVW_STATUS 0xc0010141 | 112 | #define MSR_AMD64_OSVW_STATUS 0xc0010141 |
113 | #define MSR_AMD64_DC_CFG 0xc0011022 | ||
113 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 | 114 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
114 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 | 115 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
115 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 | 116 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index 48dcfa62ea07..fd921c3a6841 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h | |||
@@ -15,6 +15,8 @@ static inline int arch_prepare_suspend(void) { return 0; } | |||
15 | struct saved_context { | 15 | struct saved_context { |
16 | u16 es, fs, gs, ss; | 16 | u16 es, fs, gs, ss; |
17 | unsigned long cr0, cr2, cr3, cr4; | 17 | unsigned long cr0, cr2, cr3, cr4; |
18 | u64 misc_enable; | ||
19 | bool misc_enable_saved; | ||
18 | struct desc_ptr gdt; | 20 | struct desc_ptr gdt; |
19 | struct desc_ptr idt; | 21 | struct desc_ptr idt; |
20 | u16 ldt; | 22 | u16 ldt; |
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 06284f42b759..8d942afae681 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h | |||
@@ -27,6 +27,8 @@ struct saved_context { | |||
27 | u16 ds, es, fs, gs, ss; | 27 | u16 ds, es, fs, gs, ss; |
28 | unsigned long gs_base, gs_kernel_base, fs_base; | 28 | unsigned long gs_base, gs_kernel_base, fs_base; |
29 | unsigned long cr0, cr2, cr3, cr4, cr8; | 29 | unsigned long cr0, cr2, cr3, cr4, cr8; |
30 | u64 misc_enable; | ||
31 | bool misc_enable_saved; | ||
30 | unsigned long efer; | 32 | unsigned long efer; |
31 | u16 gdt_pad; | 33 | u16 gdt_pad; |
32 | u16 gdt_limit; | 34 | u16 gdt_limit; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 81563e76e28f..a6f695d76928 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1815,6 +1815,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
1815 | 1815 | ||
1816 | spte |= PT_WRITABLE_MASK; | 1816 | spte |= PT_WRITABLE_MASK; |
1817 | 1817 | ||
1818 | if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK)) | ||
1819 | spte &= ~PT_USER_MASK; | ||
1820 | |||
1818 | /* | 1821 | /* |
1819 | * Optimization: for pte sync, if spte was writable the hash | 1822 | * Optimization: for pte sync, if spte was writable the hash |
1820 | * lookup is unnecessary (and expensive). Write protection | 1823 | * lookup is unnecessary (and expensive). Write protection |
@@ -1870,6 +1873,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
1870 | 1873 | ||
1871 | child = page_header(pte & PT64_BASE_ADDR_MASK); | 1874 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
1872 | mmu_page_remove_parent_pte(child, sptep); | 1875 | mmu_page_remove_parent_pte(child, sptep); |
1876 | __set_spte(sptep, shadow_trap_nonpresent_pte); | ||
1877 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
1873 | } else if (pfn != spte_to_pfn(*sptep)) { | 1878 | } else if (pfn != spte_to_pfn(*sptep)) { |
1874 | pgprintk("hfn old %lx new %lx\n", | 1879 | pgprintk("hfn old %lx new %lx\n", |
1875 | spte_to_pfn(*sptep), pfn); | 1880 | spte_to_pfn(*sptep), pfn); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 96dc232bfc56..ce438e0fdd26 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/ftrace_event.h> | 28 | #include <linux/ftrace_event.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | 30 | ||
31 | #include <asm/tlbflush.h> | ||
31 | #include <asm/desc.h> | 32 | #include <asm/desc.h> |
32 | 33 | ||
33 | #include <asm/virtext.h> | 34 | #include <asm/virtext.h> |
@@ -56,6 +57,8 @@ MODULE_LICENSE("GPL"); | |||
56 | 57 | ||
57 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) | 58 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) |
58 | 59 | ||
60 | static bool erratum_383_found __read_mostly; | ||
61 | |||
59 | static const u32 host_save_user_msrs[] = { | 62 | static const u32 host_save_user_msrs[] = { |
60 | #ifdef CONFIG_X86_64 | 63 | #ifdef CONFIG_X86_64 |
61 | MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, | 64 | MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, |
@@ -374,6 +377,31 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
374 | svm->vmcb->control.event_inj_err = error_code; | 377 | svm->vmcb->control.event_inj_err = error_code; |
375 | } | 378 | } |
376 | 379 | ||
380 | static void svm_init_erratum_383(void) | ||
381 | { | ||
382 | u32 low, high; | ||
383 | int err; | ||
384 | u64 val; | ||
385 | |||
386 | /* Only Fam10h is affected */ | ||
387 | if (boot_cpu_data.x86 != 0x10) | ||
388 | return; | ||
389 | |||
390 | /* Use _safe variants to not break nested virtualization */ | ||
391 | val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); | ||
392 | if (err) | ||
393 | return; | ||
394 | |||
395 | val |= (1ULL << 47); | ||
396 | |||
397 | low = lower_32_bits(val); | ||
398 | high = upper_32_bits(val); | ||
399 | |||
400 | native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); | ||
401 | |||
402 | erratum_383_found = true; | ||
403 | } | ||
404 | |||
377 | static int has_svm(void) | 405 | static int has_svm(void) |
378 | { | 406 | { |
379 | const char *msg; | 407 | const char *msg; |
@@ -429,6 +457,8 @@ static int svm_hardware_enable(void *garbage) | |||
429 | 457 | ||
430 | wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); | 458 | wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); |
431 | 459 | ||
460 | svm_init_erratum_383(); | ||
461 | |||
432 | return 0; | 462 | return 0; |
433 | } | 463 | } |
434 | 464 | ||
@@ -1410,8 +1440,59 @@ static int nm_interception(struct vcpu_svm *svm) | |||
1410 | return 1; | 1440 | return 1; |
1411 | } | 1441 | } |
1412 | 1442 | ||
1413 | static int mc_interception(struct vcpu_svm *svm) | 1443 | static bool is_erratum_383(void) |
1414 | { | 1444 | { |
1445 | int err, i; | ||
1446 | u64 value; | ||
1447 | |||
1448 | if (!erratum_383_found) | ||
1449 | return false; | ||
1450 | |||
1451 | value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); | ||
1452 | if (err) | ||
1453 | return false; | ||
1454 | |||
1455 | /* Bit 62 may or may not be set for this mce */ | ||
1456 | value &= ~(1ULL << 62); | ||
1457 | |||
1458 | if (value != 0xb600000000010015ULL) | ||
1459 | return false; | ||
1460 | |||
1461 | /* Clear MCi_STATUS registers */ | ||
1462 | for (i = 0; i < 6; ++i) | ||
1463 | native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); | ||
1464 | |||
1465 | value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); | ||
1466 | if (!err) { | ||
1467 | u32 low, high; | ||
1468 | |||
1469 | value &= ~(1ULL << 2); | ||
1470 | low = lower_32_bits(value); | ||
1471 | high = upper_32_bits(value); | ||
1472 | |||
1473 | native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); | ||
1474 | } | ||
1475 | |||
1476 | /* Flush tlb to evict multi-match entries */ | ||
1477 | __flush_tlb_all(); | ||
1478 | |||
1479 | return true; | ||
1480 | } | ||
1481 | |||
1482 | static void svm_handle_mce(struct vcpu_svm *svm) | ||
1483 | { | ||
1484 | if (is_erratum_383()) { | ||
1485 | /* | ||
1486 | * Erratum 383 triggered. Guest state is corrupt so kill the | ||
1487 | * guest. | ||
1488 | */ | ||
1489 | pr_err("KVM: Guest triggered AMD Erratum 383\n"); | ||
1490 | |||
1491 | set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests); | ||
1492 | |||
1493 | return; | ||
1494 | } | ||
1495 | |||
1415 | /* | 1496 | /* |
1416 | * On an #MC intercept the MCE handler is not called automatically in | 1497 | * On an #MC intercept the MCE handler is not called automatically in |
1417 | * the host. So do it by hand here. | 1498 | * the host. So do it by hand here. |
@@ -1420,6 +1501,11 @@ static int mc_interception(struct vcpu_svm *svm) | |||
1420 | "int $0x12\n"); | 1501 | "int $0x12\n"); |
1421 | /* not sure if we ever come back to this point */ | 1502 | /* not sure if we ever come back to this point */ |
1422 | 1503 | ||
1504 | return; | ||
1505 | } | ||
1506 | |||
1507 | static int mc_interception(struct vcpu_svm *svm) | ||
1508 | { | ||
1423 | return 1; | 1509 | return 1; |
1424 | } | 1510 | } |
1425 | 1511 | ||
@@ -3088,6 +3174,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3088 | vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); | 3174 | vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); |
3089 | vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); | 3175 | vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); |
3090 | } | 3176 | } |
3177 | |||
3178 | /* | ||
3179 | * We need to handle MC intercepts here before the vcpu has a chance to | ||
3180 | * change the physical cpu | ||
3181 | */ | ||
3182 | if (unlikely(svm->vmcb->control.exit_code == | ||
3183 | SVM_EXIT_EXCP_BASE + MC_VECTOR)) | ||
3184 | svm_handle_mce(svm); | ||
3091 | } | 3185 | } |
3092 | 3186 | ||
3093 | #undef R | 3187 | #undef R |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 97da2ba9344b..6fdb3ec30c31 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -96,6 +96,7 @@ EXPORT_SYMBOL(pcibios_align_resource); | |||
96 | * the fact the PCI specs explicitly allow address decoders to be | 96 | * the fact the PCI specs explicitly allow address decoders to be |
97 | * shared between expansion ROMs and other resource regions, it's | 97 | * shared between expansion ROMs and other resource regions, it's |
98 | * at least dangerous) | 98 | * at least dangerous) |
99 | * - bad resource sizes or overlaps with other regions | ||
99 | * | 100 | * |
100 | * Our solution: | 101 | * Our solution: |
101 | * (1) Allocate resources for all buses behind PCI-to-PCI bridges. | 102 | * (1) Allocate resources for all buses behind PCI-to-PCI bridges. |
@@ -136,6 +137,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
136 | * child resource allocations in this | 137 | * child resource allocations in this |
137 | * range. | 138 | * range. |
138 | */ | 139 | */ |
140 | r->start = r->end = 0; | ||
139 | r->flags = 0; | 141 | r->flags = 0; |
140 | } | 142 | } |
141 | } | 143 | } |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 0a979f3e5b8a..1290ba54b350 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -105,6 +105,8 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
105 | ctxt->cr4 = read_cr4(); | 105 | ctxt->cr4 = read_cr4(); |
106 | ctxt->cr8 = read_cr8(); | 106 | ctxt->cr8 = read_cr8(); |
107 | #endif | 107 | #endif |
108 | ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, | ||
109 | &ctxt->misc_enable); | ||
108 | } | 110 | } |
109 | 111 | ||
110 | /* Needed by apm.c */ | 112 | /* Needed by apm.c */ |
@@ -152,6 +154,8 @@ static void fix_processor_context(void) | |||
152 | */ | 154 | */ |
153 | static void __restore_processor_state(struct saved_context *ctxt) | 155 | static void __restore_processor_state(struct saved_context *ctxt) |
154 | { | 156 | { |
157 | if (ctxt->misc_enable_saved) | ||
158 | wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); | ||
155 | /* | 159 | /* |
156 | * control registers | 160 | * control registers |
157 | */ | 161 | */ |