diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-10 12:08:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-10 12:08:21 -0400 |
commit | c67723ebbb2d6f672a0e9e5b1a8d1a2442942557 (patch) | |
tree | 3518799ec3b4e5a8529de39ca8342fde50907952 /arch | |
parent | ec6671589a07d9b27ff5832138ff435b3a3c9b09 (diff) | |
parent | 326f578f7e1443bac2333712dd130a261ec15288 (diff) |
Merge tag 'kvm-3.10-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Gleb Natapov:
"Most of the fixes are in the emulator since now we emulate more than
we did before for correctness sake we see more bugs there, but there
is also an OOPS fixed and corruption of xcr0 register."
* tag 'kvm-3.10-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: emulator: emulate SALC
KVM: emulator: emulate XLAT
KVM: emulator: emulate AAM
KVM: VMX: fix halt emulation while emulating invalid guest sate
KVM: Fix kvm_irqfd_init initialization
KVM: x86: fix maintenance of guest/host xcr0 state
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/emulate.c | 42 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 40 |
3 files changed, 67 insertions, 21 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8e517bba6a7c..8db0010ed150 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #define OpGS 25ull /* GS */ | 60 | #define OpGS 25ull /* GS */ |
61 | #define OpMem8 26ull /* 8-bit zero extended memory operand */ | 61 | #define OpMem8 26ull /* 8-bit zero extended memory operand */ |
62 | #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ | 62 | #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ |
63 | #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ | ||
63 | 64 | ||
64 | #define OpBits 5 /* Width of operand field */ | 65 | #define OpBits 5 /* Width of operand field */ |
65 | #define OpMask ((1ull << OpBits) - 1) | 66 | #define OpMask ((1ull << OpBits) - 1) |
@@ -99,6 +100,7 @@ | |||
99 | #define SrcImmUByte (OpImmUByte << SrcShift) | 100 | #define SrcImmUByte (OpImmUByte << SrcShift) |
100 | #define SrcImmU (OpImmU << SrcShift) | 101 | #define SrcImmU (OpImmU << SrcShift) |
101 | #define SrcSI (OpSI << SrcShift) | 102 | #define SrcSI (OpSI << SrcShift) |
103 | #define SrcXLat (OpXLat << SrcShift) | ||
102 | #define SrcImmFAddr (OpImmFAddr << SrcShift) | 104 | #define SrcImmFAddr (OpImmFAddr << SrcShift) |
103 | #define SrcMemFAddr (OpMemFAddr << SrcShift) | 105 | #define SrcMemFAddr (OpMemFAddr << SrcShift) |
104 | #define SrcAcc (OpAcc << SrcShift) | 106 | #define SrcAcc (OpAcc << SrcShift) |
@@ -533,6 +535,9 @@ FOP_SETCC(setle) | |||
533 | FOP_SETCC(setnle) | 535 | FOP_SETCC(setnle) |
534 | FOP_END; | 536 | FOP_END; |
535 | 537 | ||
538 | FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET | ||
539 | FOP_END; | ||
540 | |||
536 | #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ | 541 | #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ |
537 | do { \ | 542 | do { \ |
538 | unsigned long _tmp; \ | 543 | unsigned long _tmp; \ |
@@ -2996,6 +3001,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt) | |||
2996 | return X86EMUL_CONTINUE; | 3001 | return X86EMUL_CONTINUE; |
2997 | } | 3002 | } |
2998 | 3003 | ||
3004 | static int em_aam(struct x86_emulate_ctxt *ctxt) | ||
3005 | { | ||
3006 | u8 al, ah; | ||
3007 | |||
3008 | if (ctxt->src.val == 0) | ||
3009 | return emulate_de(ctxt); | ||
3010 | |||
3011 | al = ctxt->dst.val & 0xff; | ||
3012 | ah = al / ctxt->src.val; | ||
3013 | al %= ctxt->src.val; | ||
3014 | |||
3015 | ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); | ||
3016 | |||
3017 | /* Set PF, ZF, SF */ | ||
3018 | ctxt->src.type = OP_IMM; | ||
3019 | ctxt->src.val = 0; | ||
3020 | ctxt->src.bytes = 1; | ||
3021 | fastop(ctxt, em_or); | ||
3022 | |||
3023 | return X86EMUL_CONTINUE; | ||
3024 | } | ||
3025 | |||
2999 | static int em_aad(struct x86_emulate_ctxt *ctxt) | 3026 | static int em_aad(struct x86_emulate_ctxt *ctxt) |
3000 | { | 3027 | { |
3001 | u8 al = ctxt->dst.val & 0xff; | 3028 | u8 al = ctxt->dst.val & 0xff; |
@@ -3936,7 +3963,10 @@ static const struct opcode opcode_table[256] = { | |||
3936 | /* 0xD0 - 0xD7 */ | 3963 | /* 0xD0 - 0xD7 */ |
3937 | G(Src2One | ByteOp, group2), G(Src2One, group2), | 3964 | G(Src2One | ByteOp, group2), G(Src2One, group2), |
3938 | G(Src2CL | ByteOp, group2), G(Src2CL, group2), | 3965 | G(Src2CL | ByteOp, group2), G(Src2CL, group2), |
3939 | N, I(DstAcc | SrcImmByte | No64, em_aad), N, N, | 3966 | I(DstAcc | SrcImmUByte | No64, em_aam), |
3967 | I(DstAcc | SrcImmUByte | No64, em_aad), | ||
3968 | F(DstAcc | ByteOp | No64, em_salc), | ||
3969 | I(DstAcc | SrcXLat | ByteOp, em_mov), | ||
3940 | /* 0xD8 - 0xDF */ | 3970 | /* 0xD8 - 0xDF */ |
3941 | N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, | 3971 | N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, |
3942 | /* 0xE0 - 0xE7 */ | 3972 | /* 0xE0 - 0xE7 */ |
@@ -4198,6 +4228,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4198 | op->val = 0; | 4228 | op->val = 0; |
4199 | op->count = 1; | 4229 | op->count = 1; |
4200 | break; | 4230 | break; |
4231 | case OpXLat: | ||
4232 | op->type = OP_MEM; | ||
4233 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | ||
4234 | op->addr.mem.ea = | ||
4235 | register_address(ctxt, | ||
4236 | reg_read(ctxt, VCPU_REGS_RBX) + | ||
4237 | (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); | ||
4238 | op->addr.mem.seg = seg_override(ctxt); | ||
4239 | op->val = 0; | ||
4240 | break; | ||
4201 | case OpImmFAddr: | 4241 | case OpImmFAddr: |
4202 | op->type = OP_IMM; | 4242 | op->type = OP_IMM; |
4203 | op->addr.mem.ea = ctxt->_eip; | 4243 | op->addr.mem.ea = ctxt->_eip; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 25a791ed21c8..260a91939555 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -5434,6 +5434,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
5434 | return 0; | 5434 | return 0; |
5435 | } | 5435 | } |
5436 | 5436 | ||
5437 | if (vcpu->arch.halt_request) { | ||
5438 | vcpu->arch.halt_request = 0; | ||
5439 | ret = kvm_emulate_halt(vcpu); | ||
5440 | goto out; | ||
5441 | } | ||
5442 | |||
5437 | if (signal_pending(current)) | 5443 | if (signal_pending(current)) |
5438 | goto out; | 5444 | goto out; |
5439 | if (need_resched()) | 5445 | if (need_resched()) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 05a8b1a2300d..094b5d96ab14 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -555,6 +555,25 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | |||
555 | } | 555 | } |
556 | EXPORT_SYMBOL_GPL(kvm_lmsw); | 556 | EXPORT_SYMBOL_GPL(kvm_lmsw); |
557 | 557 | ||
558 | static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) | ||
559 | { | ||
560 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && | ||
561 | !vcpu->guest_xcr0_loaded) { | ||
562 | /* kvm_set_xcr() also depends on this */ | ||
563 | xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); | ||
564 | vcpu->guest_xcr0_loaded = 1; | ||
565 | } | ||
566 | } | ||
567 | |||
568 | static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) | ||
569 | { | ||
570 | if (vcpu->guest_xcr0_loaded) { | ||
571 | if (vcpu->arch.xcr0 != host_xcr0) | ||
572 | xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); | ||
573 | vcpu->guest_xcr0_loaded = 0; | ||
574 | } | ||
575 | } | ||
576 | |||
558 | int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | 577 | int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
559 | { | 578 | { |
560 | u64 xcr0; | 579 | u64 xcr0; |
@@ -571,8 +590,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
571 | return 1; | 590 | return 1; |
572 | if (xcr0 & ~host_xcr0) | 591 | if (xcr0 & ~host_xcr0) |
573 | return 1; | 592 | return 1; |
593 | kvm_put_guest_xcr0(vcpu); | ||
574 | vcpu->arch.xcr0 = xcr0; | 594 | vcpu->arch.xcr0 = xcr0; |
575 | vcpu->guest_xcr0_loaded = 0; | ||
576 | return 0; | 595 | return 0; |
577 | } | 596 | } |
578 | 597 | ||
@@ -5614,25 +5633,6 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) | |||
5614 | } | 5633 | } |
5615 | } | 5634 | } |
5616 | 5635 | ||
5617 | static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) | ||
5618 | { | ||
5619 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && | ||
5620 | !vcpu->guest_xcr0_loaded) { | ||
5621 | /* kvm_set_xcr() also depends on this */ | ||
5622 | xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); | ||
5623 | vcpu->guest_xcr0_loaded = 1; | ||
5624 | } | ||
5625 | } | ||
5626 | |||
5627 | static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) | ||
5628 | { | ||
5629 | if (vcpu->guest_xcr0_loaded) { | ||
5630 | if (vcpu->arch.xcr0 != host_xcr0) | ||
5631 | xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); | ||
5632 | vcpu->guest_xcr0_loaded = 0; | ||
5633 | } | ||
5634 | } | ||
5635 | |||
5636 | static void process_nmi(struct kvm_vcpu *vcpu) | 5636 | static void process_nmi(struct kvm_vcpu *vcpu) |
5637 | { | 5637 | { |
5638 | unsigned limit = 2; | 5638 | unsigned limit = 2; |