diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2011-04-04 06:39:36 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-05-11 07:57:03 -0400 |
commit | 628afd2aeb286415f6e7d0ee7c87aae249e7f999 (patch) | |
tree | 740b712e0d3cb32f55bc0df6ecafd1b65233208d /arch | |
parent | f6511935f424b9a25059ae18e91ad11dd24980e6 (diff) |
KVM: SVM: Remove nested sel_cr0_write handling code
This patch removes all the old code which handled the nested
selective cr0 write intercepts. This code was only in place
as a work-around until the instruction emulator is capable
of doing the same. This is the case with this patch-set and
so the code can be removed.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/svm.c | 78 |
1 files changed, 26 insertions, 52 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5c6512dbac7c..779b09194f03 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -93,14 +93,6 @@ struct nested_state { | |||
93 | /* A VMEXIT is required but not yet emulated */ | 93 | /* A VMEXIT is required but not yet emulated */ |
94 | bool exit_required; | 94 | bool exit_required; |
95 | 95 | ||
96 | /* | ||
97 | * If we vmexit during an instruction emulation we need this to restore | ||
98 | * the l1 guest rip after the emulation | ||
99 | */ | ||
100 | unsigned long vmexit_rip; | ||
101 | unsigned long vmexit_rsp; | ||
102 | unsigned long vmexit_rax; | ||
103 | |||
104 | /* cache for intercepts of the guest */ | 96 | /* cache for intercepts of the guest */ |
105 | u32 intercept_cr; | 97 | u32 intercept_cr; |
106 | u32 intercept_dr; | 98 | u32 intercept_dr; |
@@ -1362,31 +1354,6 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1362 | { | 1354 | { |
1363 | struct vcpu_svm *svm = to_svm(vcpu); | 1355 | struct vcpu_svm *svm = to_svm(vcpu); |
1364 | 1356 | ||
1365 | if (is_guest_mode(vcpu)) { | ||
1366 | /* | ||
1367 | * We are here because we run in nested mode, the host kvm | ||
1368 | * intercepts cr0 writes but the l1 hypervisor does not. | ||
1369 | * But the L1 hypervisor may intercept selective cr0 writes. | ||
1370 | * This needs to be checked here. | ||
1371 | */ | ||
1372 | unsigned long old, new; | ||
1373 | |||
1374 | /* Remove bits that would trigger a real cr0 write intercept */ | ||
1375 | old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK; | ||
1376 | new = cr0 & SVM_CR0_SELECTIVE_MASK; | ||
1377 | |||
1378 | if (old == new) { | ||
1379 | /* cr0 write with ts and mp unchanged */ | ||
1380 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; | ||
1381 | if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) { | ||
1382 | svm->nested.vmexit_rip = kvm_rip_read(vcpu); | ||
1383 | svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); | ||
1384 | svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX); | ||
1385 | return; | ||
1386 | } | ||
1387 | } | ||
1388 | } | ||
1389 | |||
1390 | #ifdef CONFIG_X86_64 | 1357 | #ifdef CONFIG_X86_64 |
1391 | if (vcpu->arch.efer & EFER_LME) { | 1358 | if (vcpu->arch.efer & EFER_LME) { |
1392 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { | 1359 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
@@ -2673,6 +2640,29 @@ static int emulate_on_interception(struct vcpu_svm *svm) | |||
2673 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; | 2640 | return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; |
2674 | } | 2641 | } |
2675 | 2642 | ||
2643 | bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val) | ||
2644 | { | ||
2645 | unsigned long cr0 = svm->vcpu.arch.cr0; | ||
2646 | bool ret = false; | ||
2647 | u64 intercept; | ||
2648 | |||
2649 | intercept = svm->nested.intercept; | ||
2650 | |||
2651 | if (!is_guest_mode(&svm->vcpu) || | ||
2652 | (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))) | ||
2653 | return false; | ||
2654 | |||
2655 | cr0 &= ~SVM_CR0_SELECTIVE_MASK; | ||
2656 | val &= ~SVM_CR0_SELECTIVE_MASK; | ||
2657 | |||
2658 | if (cr0 ^ val) { | ||
2659 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; | ||
2660 | ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); | ||
2661 | } | ||
2662 | |||
2663 | return ret; | ||
2664 | } | ||
2665 | |||
2676 | #define CR_VALID (1ULL << 63) | 2666 | #define CR_VALID (1ULL << 63) |
2677 | 2667 | ||
2678 | static int cr_interception(struct vcpu_svm *svm) | 2668 | static int cr_interception(struct vcpu_svm *svm) |
@@ -2696,7 +2686,8 @@ static int cr_interception(struct vcpu_svm *svm) | |||
2696 | val = kvm_register_read(&svm->vcpu, reg); | 2686 | val = kvm_register_read(&svm->vcpu, reg); |
2697 | switch (cr) { | 2687 | switch (cr) { |
2698 | case 0: | 2688 | case 0: |
2699 | err = kvm_set_cr0(&svm->vcpu, val); | 2689 | if (!check_selective_cr0_intercepted(svm, val)) |
2690 | err = kvm_set_cr0(&svm->vcpu, val); | ||
2700 | break; | 2691 | break; |
2701 | case 3: | 2692 | case 3: |
2702 | err = kvm_set_cr3(&svm->vcpu, val); | 2693 | err = kvm_set_cr3(&svm->vcpu, val); |
@@ -2741,23 +2732,6 @@ static int cr_interception(struct vcpu_svm *svm) | |||
2741 | return 1; | 2732 | return 1; |
2742 | } | 2733 | } |
2743 | 2734 | ||
2744 | static int cr0_write_interception(struct vcpu_svm *svm) | ||
2745 | { | ||
2746 | struct kvm_vcpu *vcpu = &svm->vcpu; | ||
2747 | int r; | ||
2748 | |||
2749 | r = cr_interception(svm); | ||
2750 | |||
2751 | if (svm->nested.vmexit_rip) { | ||
2752 | kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip); | ||
2753 | kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp); | ||
2754 | kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax); | ||
2755 | svm->nested.vmexit_rip = 0; | ||
2756 | } | ||
2757 | |||
2758 | return r; | ||
2759 | } | ||
2760 | |||
2761 | static int dr_interception(struct vcpu_svm *svm) | 2735 | static int dr_interception(struct vcpu_svm *svm) |
2762 | { | 2736 | { |
2763 | int reg, dr; | 2737 | int reg, dr; |
@@ -3045,7 +3019,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { | |||
3045 | [SVM_EXIT_READ_CR4] = cr_interception, | 3019 | [SVM_EXIT_READ_CR4] = cr_interception, |
3046 | [SVM_EXIT_READ_CR8] = cr_interception, | 3020 | [SVM_EXIT_READ_CR8] = cr_interception, |
3047 | [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, | 3021 | [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, |
3048 | [SVM_EXIT_WRITE_CR0] = cr0_write_interception, | 3022 | [SVM_EXIT_WRITE_CR0] = cr_interception, |
3049 | [SVM_EXIT_WRITE_CR3] = cr_interception, | 3023 | [SVM_EXIT_WRITE_CR3] = cr_interception, |
3050 | [SVM_EXIT_WRITE_CR4] = cr_interception, | 3024 | [SVM_EXIT_WRITE_CR4] = cr_interception, |
3051 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, | 3025 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, |