aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@amd.com>2010-12-21 05:12:04 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:31:03 -0500
commit7ff76d58a9dc03a38b86d283abcaae2ac3c74fe3 (patch)
treef45fdc961458c760f4d8a64f4f5e9269ce82bc38 /arch/x86/kvm/svm.c
parentddce97aac5405e0b2b8b2191cb65b5a48eb14145 (diff)
KVM: SVM: enhance MOV CR intercept handler
Newer SVM implementations provide the GPR number in the VMCB, so that the emulation path is no longer necesarry to handle CR register access intercepts. Implement the handling in svm.c and use it when the info is provided. Signed-off-by: Andre Przywara <andre.przywara@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c90
1 files changed, 79 insertions, 11 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2830a73ea736..5abaa5b2f624 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2660,12 +2660,80 @@ static int emulate_on_interception(struct vcpu_svm *svm)
2660 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; 2660 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2661} 2661}
2662 2662
2663#define CR_VALID (1ULL << 63)
2664
2665static int cr_interception(struct vcpu_svm *svm)
2666{
2667 int reg, cr;
2668 unsigned long val;
2669 int err;
2670
2671 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2672 return emulate_on_interception(svm);
2673
2674 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2675 return emulate_on_interception(svm);
2676
2677 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2678 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2679
2680 err = 0;
2681 if (cr >= 16) { /* mov to cr */
2682 cr -= 16;
2683 val = kvm_register_read(&svm->vcpu, reg);
2684 switch (cr) {
2685 case 0:
2686 err = kvm_set_cr0(&svm->vcpu, val);
2687 break;
2688 case 3:
2689 err = kvm_set_cr3(&svm->vcpu, val);
2690 break;
2691 case 4:
2692 err = kvm_set_cr4(&svm->vcpu, val);
2693 break;
2694 case 8:
2695 err = kvm_set_cr8(&svm->vcpu, val);
2696 break;
2697 default:
2698 WARN(1, "unhandled write to CR%d", cr);
2699 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2700 return 1;
2701 }
2702 } else { /* mov from cr */
2703 switch (cr) {
2704 case 0:
2705 val = kvm_read_cr0(&svm->vcpu);
2706 break;
2707 case 2:
2708 val = svm->vcpu.arch.cr2;
2709 break;
2710 case 3:
2711 val = svm->vcpu.arch.cr3;
2712 break;
2713 case 4:
2714 val = kvm_read_cr4(&svm->vcpu);
2715 break;
2716 case 8:
2717 val = kvm_get_cr8(&svm->vcpu);
2718 break;
2719 default:
2720 WARN(1, "unhandled read from CR%d", cr);
2721 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2722 return 1;
2723 }
2724 kvm_register_write(&svm->vcpu, reg, val);
2725 }
2726 kvm_complete_insn_gp(&svm->vcpu, err);
2727
2728 return 1;
2729}
2730
2663static int cr0_write_interception(struct vcpu_svm *svm) 2731static int cr0_write_interception(struct vcpu_svm *svm)
2664{ 2732{
2665 struct kvm_vcpu *vcpu = &svm->vcpu; 2733 struct kvm_vcpu *vcpu = &svm->vcpu;
2666 int r; 2734 int r;
2667 2735
2668 r = emulate_instruction(&svm->vcpu, 0); 2736 r = cr_interception(svm);
2669 2737
2670 if (svm->nested.vmexit_rip) { 2738 if (svm->nested.vmexit_rip) {
2671 kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip); 2739 kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2674,7 +2742,7 @@ static int cr0_write_interception(struct vcpu_svm *svm)
2674 svm->nested.vmexit_rip = 0; 2742 svm->nested.vmexit_rip = 0;
2675 } 2743 }
2676 2744
2677 return r == EMULATE_DONE; 2745 return r;
2678} 2746}
2679 2747
2680static int cr8_write_interception(struct vcpu_svm *svm) 2748static int cr8_write_interception(struct vcpu_svm *svm)
@@ -2684,13 +2752,13 @@ static int cr8_write_interception(struct vcpu_svm *svm)
2684 2752
2685 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); 2753 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2686 /* instruction emulation calls kvm_set_cr8() */ 2754 /* instruction emulation calls kvm_set_cr8() */
2687 r = emulate_instruction(&svm->vcpu, 0); 2755 r = cr_interception(svm);
2688 if (irqchip_in_kernel(svm->vcpu.kvm)) { 2756 if (irqchip_in_kernel(svm->vcpu.kvm)) {
2689 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); 2757 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2690 return r == EMULATE_DONE; 2758 return r;
2691 } 2759 }
2692 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) 2760 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
2693 return r == EMULATE_DONE; 2761 return r;
2694 kvm_run->exit_reason = KVM_EXIT_SET_TPR; 2762 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2695 return 0; 2763 return 0;
2696} 2764}
@@ -2933,14 +3001,14 @@ static int pause_interception(struct vcpu_svm *svm)
2933} 3001}
2934 3002
2935static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { 3003static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2936 [SVM_EXIT_READ_CR0] = emulate_on_interception, 3004 [SVM_EXIT_READ_CR0] = cr_interception,
2937 [SVM_EXIT_READ_CR3] = emulate_on_interception, 3005 [SVM_EXIT_READ_CR3] = cr_interception,
2938 [SVM_EXIT_READ_CR4] = emulate_on_interception, 3006 [SVM_EXIT_READ_CR4] = cr_interception,
2939 [SVM_EXIT_READ_CR8] = emulate_on_interception, 3007 [SVM_EXIT_READ_CR8] = cr_interception,
2940 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, 3008 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2941 [SVM_EXIT_WRITE_CR0] = cr0_write_interception, 3009 [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
2942 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 3010 [SVM_EXIT_WRITE_CR3] = cr_interception,
2943 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 3011 [SVM_EXIT_WRITE_CR4] = cr_interception,
2944 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 3012 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
2945 [SVM_EXIT_READ_DR0] = emulate_on_interception, 3013 [SVM_EXIT_READ_DR0] = emulate_on_interception,
2946 [SVM_EXIT_READ_DR1] = emulate_on_interception, 3014 [SVM_EXIT_READ_DR1] = emulate_on_interception,