diff options
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/emulate.c | 30 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 13 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 5 |
3 files changed, 34 insertions, 14 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 97d9a9914ba8..a3b57a27be88 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) | |||
475 | return address_mask(ctxt, reg); | 475 | return address_mask(ctxt, reg); |
476 | } | 476 | } |
477 | 477 | ||
478 | static void masked_increment(ulong *reg, ulong mask, int inc) | ||
479 | { | ||
480 | assign_masked(reg, *reg + inc, mask); | ||
481 | } | ||
482 | |||
478 | static inline void | 483 | static inline void |
479 | register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) | 484 | register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) |
480 | { | 485 | { |
486 | ulong mask; | ||
487 | |||
481 | if (ctxt->ad_bytes == sizeof(unsigned long)) | 488 | if (ctxt->ad_bytes == sizeof(unsigned long)) |
482 | *reg += inc; | 489 | mask = ~0UL; |
483 | else | 490 | else |
484 | *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); | 491 | mask = ad_mask(ctxt); |
492 | masked_increment(reg, mask, inc); | ||
493 | } | ||
494 | |||
495 | static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) | ||
496 | { | ||
497 | masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc); | ||
485 | } | 498 | } |
486 | 499 | ||
487 | static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) | 500 | static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) |
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) | |||
1522 | { | 1535 | { |
1523 | struct segmented_address addr; | 1536 | struct segmented_address addr; |
1524 | 1537 | ||
1525 | register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); | 1538 | rsp_increment(ctxt, -bytes); |
1526 | addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); | 1539 | addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); |
1527 | addr.seg = VCPU_SREG_SS; | 1540 | addr.seg = VCPU_SREG_SS; |
1528 | 1541 | ||
1529 | return segmented_write(ctxt, addr, data, bytes); | 1542 | return segmented_write(ctxt, addr, data, bytes); |
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, | |||
1542 | int rc; | 1555 | int rc; |
1543 | struct segmented_address addr; | 1556 | struct segmented_address addr; |
1544 | 1557 | ||
1545 | addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); | 1558 | addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); |
1546 | addr.seg = VCPU_SREG_SS; | 1559 | addr.seg = VCPU_SREG_SS; |
1547 | rc = segmented_read(ctxt, addr, dest, len); | 1560 | rc = segmented_read(ctxt, addr, dest, len); |
1548 | if (rc != X86EMUL_CONTINUE) | 1561 | if (rc != X86EMUL_CONTINUE) |
1549 | return rc; | 1562 | return rc; |
1550 | 1563 | ||
1551 | register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); | 1564 | rsp_increment(ctxt, len); |
1552 | return rc; | 1565 | return rc; |
1553 | } | 1566 | } |
1554 | 1567 | ||
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) | |||
1688 | 1701 | ||
1689 | while (reg >= VCPU_REGS_RAX) { | 1702 | while (reg >= VCPU_REGS_RAX) { |
1690 | if (reg == VCPU_REGS_RSP) { | 1703 | if (reg == VCPU_REGS_RSP) { |
1691 | register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], | 1704 | rsp_increment(ctxt, ctxt->op_bytes); |
1692 | ctxt->op_bytes); | ||
1693 | --reg; | 1705 | --reg; |
1694 | } | 1706 | } |
1695 | 1707 | ||
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) | |||
2825 | rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); | 2837 | rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); |
2826 | if (rc != X86EMUL_CONTINUE) | 2838 | if (rc != X86EMUL_CONTINUE) |
2827 | return rc; | 2839 | return rc; |
2828 | register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); | 2840 | rsp_increment(ctxt, ctxt->src.val); |
2829 | return X86EMUL_CONTINUE; | 2841 | return X86EMUL_CONTINUE; |
2830 | } | 2842 | } |
2831 | 2843 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 01ca00423938..7fbd0d273ea8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) | |||
4113 | LIST_HEAD(invalid_list); | 4113 | LIST_HEAD(invalid_list); |
4114 | 4114 | ||
4115 | /* | 4115 | /* |
4116 | * Never scan more than sc->nr_to_scan VM instances. | ||
4117 | * Will not hit this condition practically since we do not try | ||
4118 | * to shrink more than one VM and it is very unlikely to see | ||
4119 | * !n_used_mmu_pages so many times. | ||
4120 | */ | ||
4121 | if (!nr_to_scan--) | ||
4122 | break; | ||
4123 | /* | ||
4116 | * n_used_mmu_pages is accessed without holding kvm->mmu_lock | 4124 | * n_used_mmu_pages is accessed without holding kvm->mmu_lock |
4117 | * here. We may skip a VM instance errorneosly, but we do not | 4125 | * here. We may skip a VM instance errorneosly, but we do not |
4118 | * want to shrink a VM that only started to populate its MMU | 4126 | * want to shrink a VM that only started to populate its MMU |
4119 | * anyway. | 4127 | * anyway. |
4120 | */ | 4128 | */ |
4121 | if (kvm->arch.n_used_mmu_pages > 0) { | 4129 | if (!kvm->arch.n_used_mmu_pages) |
4122 | if (!nr_to_scan--) | ||
4123 | break; | ||
4124 | continue; | 4130 | continue; |
4125 | } | ||
4126 | 4131 | ||
4127 | idx = srcu_read_lock(&kvm->srcu); | 4132 | idx = srcu_read_lock(&kvm->srcu); |
4128 | spin_lock(&kvm->mmu_lock); | 4133 | spin_lock(&kvm->mmu_lock); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 42bce48f6928..148ed666e311 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc); | |||
806 | * kvm-specific. Those are put in the beginning of the list. | 806 | * kvm-specific. Those are put in the beginning of the list. |
807 | */ | 807 | */ |
808 | 808 | ||
809 | #define KVM_SAVE_MSRS_BEGIN 9 | 809 | #define KVM_SAVE_MSRS_BEGIN 10 |
810 | static u32 msrs_to_save[] = { | 810 | static u32 msrs_to_save[] = { |
811 | MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, | 811 | MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, |
812 | MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, | 812 | MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, |
@@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
2000 | case MSR_KVM_STEAL_TIME: | 2000 | case MSR_KVM_STEAL_TIME: |
2001 | data = vcpu->arch.st.msr_val; | 2001 | data = vcpu->arch.st.msr_val; |
2002 | break; | 2002 | break; |
2003 | case MSR_KVM_PV_EOI_EN: | ||
2004 | data = vcpu->arch.pv_eoi.msr_val; | ||
2005 | break; | ||
2003 | case MSR_IA32_P5_MC_ADDR: | 2006 | case MSR_IA32_P5_MC_ADDR: |
2004 | case MSR_IA32_P5_MC_TYPE: | 2007 | case MSR_IA32_P5_MC_TYPE: |
2005 | case MSR_IA32_MCG_CAP: | 2008 | case MSR_IA32_MCG_CAP: |