aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-06-10 13:53:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-06-10 13:53:14 -0400
commit7908a9e5fc3f9a679b1777ed231a03636c068446 (patch)
tree8d11c58a2b2550095f0945547e520062466b69f0 /arch
parent8fade6aff706b2ae3f02864b4023d34b002cd226 (diff)
parent07dc7263b99e4ddad2b4c69765a428ccb7d48938 (diff)
Merge branch 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: read apic->irr with ioapic lock held KVM: ia64: Add missing spin_unlock in kvm_arch_hardware_enable() KVM: Fix order passed to iommu_unmap KVM: MMU: Remove user access when allowing kernel access to gpte.w=0 page KVM: MMU: invalidate and flush on spte small->large page size change KVM: SVM: Implement workaround for Erratum 383 KVM: SVM: Handle MCEs early in the vmexit process KVM: powerpc: fix init/exit annotation
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c1
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/svm.c96
5 files changed, 103 insertions, 2 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d5f4e9161201..21b701374f72 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -144,6 +144,7 @@ int kvm_arch_hardware_enable(void *garbage)
144 VP_INIT_ENV : VP_INIT_ENV_INITALIZE, 144 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); 145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) { 146 if (status != 0) {
147 spin_unlock(&vp_lock);
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); 148 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return -EINVAL; 149 return -EINVAL;
149 } 150 }
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index bc2b4004eb26..e8a00b0c4449 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -164,7 +164,7 @@ static int __init kvmppc_e500_init(void)
164 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); 164 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
165} 165}
166 166
167static void __init kvmppc_e500_exit(void) 167static void __exit kvmppc_e500_exit(void)
168{ 168{
169 kvmppc_booke_exit(); 169 kvmppc_booke_exit();
170} 170}
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b49d8ca228f6..8c7ae4318629 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -110,6 +110,7 @@
110#define MSR_AMD64_PATCH_LOADER 0xc0010020 110#define MSR_AMD64_PATCH_LOADER 0xc0010020
111#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 111#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
112#define MSR_AMD64_OSVW_STATUS 0xc0010141 112#define MSR_AMD64_OSVW_STATUS 0xc0010141
113#define MSR_AMD64_DC_CFG 0xc0011022
113#define MSR_AMD64_IBSFETCHCTL 0xc0011030 114#define MSR_AMD64_IBSFETCHCTL 0xc0011030
114#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 115#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
115#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 116#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 81563e76e28f..a6f695d76928 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1815,6 +1815,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1815 1815
1816 spte |= PT_WRITABLE_MASK; 1816 spte |= PT_WRITABLE_MASK;
1817 1817
1818 if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
1819 spte &= ~PT_USER_MASK;
1820
1818 /* 1821 /*
1819 * Optimization: for pte sync, if spte was writable the hash 1822 * Optimization: for pte sync, if spte was writable the hash
1820 * lookup is unnecessary (and expensive). Write protection 1823 * lookup is unnecessary (and expensive). Write protection
@@ -1870,6 +1873,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1870 1873
1871 child = page_header(pte & PT64_BASE_ADDR_MASK); 1874 child = page_header(pte & PT64_BASE_ADDR_MASK);
1872 mmu_page_remove_parent_pte(child, sptep); 1875 mmu_page_remove_parent_pte(child, sptep);
1876 __set_spte(sptep, shadow_trap_nonpresent_pte);
1877 kvm_flush_remote_tlbs(vcpu->kvm);
1873 } else if (pfn != spte_to_pfn(*sptep)) { 1878 } else if (pfn != spte_to_pfn(*sptep)) {
1874 pgprintk("hfn old %lx new %lx\n", 1879 pgprintk("hfn old %lx new %lx\n",
1875 spte_to_pfn(*sptep), pfn); 1880 spte_to_pfn(*sptep), pfn);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 96dc232bfc56..ce438e0fdd26 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -28,6 +28,7 @@
28#include <linux/ftrace_event.h> 28#include <linux/ftrace_event.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include <asm/tlbflush.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
32 33
33#include <asm/virtext.h> 34#include <asm/virtext.h>
@@ -56,6 +57,8 @@ MODULE_LICENSE("GPL");
56 57
57#define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 58#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
58 59
60static bool erratum_383_found __read_mostly;
61
59static const u32 host_save_user_msrs[] = { 62static const u32 host_save_user_msrs[] = {
60#ifdef CONFIG_X86_64 63#ifdef CONFIG_X86_64
61 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, 64 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
@@ -374,6 +377,31 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
374 svm->vmcb->control.event_inj_err = error_code; 377 svm->vmcb->control.event_inj_err = error_code;
375} 378}
376 379
380static void svm_init_erratum_383(void)
381{
382 u32 low, high;
383 int err;
384 u64 val;
385
386 /* Only Fam10h is affected */
387 if (boot_cpu_data.x86 != 0x10)
388 return;
389
390 /* Use _safe variants to not break nested virtualization */
391 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
392 if (err)
393 return;
394
395 val |= (1ULL << 47);
396
397 low = lower_32_bits(val);
398 high = upper_32_bits(val);
399
400 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
401
402 erratum_383_found = true;
403}
404
377static int has_svm(void) 405static int has_svm(void)
378{ 406{
379 const char *msg; 407 const char *msg;
@@ -429,6 +457,8 @@ static int svm_hardware_enable(void *garbage)
429 457
430 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); 458 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
431 459
460 svm_init_erratum_383();
461
432 return 0; 462 return 0;
433} 463}
434 464
@@ -1410,8 +1440,59 @@ static int nm_interception(struct vcpu_svm *svm)
1410 return 1; 1440 return 1;
1411} 1441}
1412 1442
1413static int mc_interception(struct vcpu_svm *svm) 1443static bool is_erratum_383(void)
1414{ 1444{
1445 int err, i;
1446 u64 value;
1447
1448 if (!erratum_383_found)
1449 return false;
1450
1451 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1452 if (err)
1453 return false;
1454
1455 /* Bit 62 may or may not be set for this mce */
1456 value &= ~(1ULL << 62);
1457
1458 if (value != 0xb600000000010015ULL)
1459 return false;
1460
1461 /* Clear MCi_STATUS registers */
1462 for (i = 0; i < 6; ++i)
1463 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1464
1465 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1466 if (!err) {
1467 u32 low, high;
1468
1469 value &= ~(1ULL << 2);
1470 low = lower_32_bits(value);
1471 high = upper_32_bits(value);
1472
1473 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1474 }
1475
1476 /* Flush tlb to evict multi-match entries */
1477 __flush_tlb_all();
1478
1479 return true;
1480}
1481
1482static void svm_handle_mce(struct vcpu_svm *svm)
1483{
1484 if (is_erratum_383()) {
1485 /*
1486 * Erratum 383 triggered. Guest state is corrupt so kill the
1487 * guest.
1488 */
1489 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1490
1491 set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests);
1492
1493 return;
1494 }
1495
1415 /* 1496 /*
1416 * On an #MC intercept the MCE handler is not called automatically in 1497 * On an #MC intercept the MCE handler is not called automatically in
1417 * the host. So do it by hand here. 1498 * the host. So do it by hand here.
@@ -1420,6 +1501,11 @@ static int mc_interception(struct vcpu_svm *svm)
1420 "int $0x12\n"); 1501 "int $0x12\n");
1421 /* not sure if we ever come back to this point */ 1502 /* not sure if we ever come back to this point */
1422 1503
1504 return;
1505}
1506
1507static int mc_interception(struct vcpu_svm *svm)
1508{
1423 return 1; 1509 return 1;
1424} 1510}
1425 1511
@@ -3088,6 +3174,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3088 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); 3174 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3089 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); 3175 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3090 } 3176 }
3177
3178 /*
3179 * We need to handle MC intercepts here before the vcpu has a chance to
3180 * change the physical cpu
3181 */
3182 if (unlikely(svm->vmcb->control.exit_code ==
3183 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3184 svm_handle_mce(svm);
3091} 3185}
3092 3186
3093#undef R 3187#undef R