aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-04-04 06:39:28 -0400
committerAvi Kivity <avi@redhat.com>2011-05-11 07:57:01 -0400
commitcfec82cb7d313ae5b2c2dbb974401d7c214c7b09 (patch)
tree63351befc6b9981127ac8dd2d22149d3ddf75806 /arch/x86
parent8a76d7f25f8f24fc5a328c8e15e4a7313cf141b9 (diff)
KVM: SVM: Add intercept check for emulated cr accesses
This patch adds all necessary intercept checks for instructions that access the crX registers. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h15
-rw-r--r--arch/x86/kvm/emulate.c105
-rw-r--r--arch/x86/kvm/svm.c81
-rw-r--r--arch/x86/kvm/x86.c13
5 files changed, 192 insertions, 25 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index eb7033cefe8e..2c0b5b47464f 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -304,6 +304,9 @@ enum x86_intercept_stage {
304 304
305enum x86_intercept { 305enum x86_intercept {
306 x86_intercept_none, 306 x86_intercept_none,
307 x86_intercept_cr_read,
308 x86_intercept_cr_write,
309 x86_intercept_clts,
307 x86_intercept_lmsw, 310 x86_intercept_lmsw,
308 x86_intercept_smsw, 311 x86_intercept_smsw,
309 x86_intercept_lidt, 312 x86_intercept_lidt,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 038562c222e8..f7dfd6479d02 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -35,10 +35,25 @@
35#define KVM_PIO_PAGE_OFFSET 1 35#define KVM_PIO_PAGE_OFFSET 1
36#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 36#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
37 37
38#define CR0_RESERVED_BITS \
39 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
40 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
41 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
42
38#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 43#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
39#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 44#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
40#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 45#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
41 0xFFFFFF0000000000ULL) 46 0xFFFFFF0000000000ULL)
47#define CR4_RESERVED_BITS \
48 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
49 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
50 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
51 | X86_CR4_OSXSAVE \
52 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
53
54#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
55
56
42 57
43#define INVALID_PAGE (~(hpa_t)0) 58#define INVALID_PAGE (~(hpa_t)0)
44#define VALID_PAGE(x) ((x) != INVALID_PAGE) 59#define VALID_PAGE(x) ((x) != INVALID_PAGE)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e3e96eada6f3..d2e77755efe8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2445,6 +2445,95 @@ static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2445 return X86EMUL_CONTINUE; 2445 return X86EMUL_CONTINUE;
2446} 2446}
2447 2447
2448static bool valid_cr(int nr)
2449{
2450 switch (nr) {
2451 case 0:
2452 case 2 ... 4:
2453 case 8:
2454 return true;
2455 default:
2456 return false;
2457 }
2458}
2459
2460static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2461{
2462 struct decode_cache *c = &ctxt->decode;
2463
2464 if (!valid_cr(c->modrm_reg))
2465 return emulate_ud(ctxt);
2466
2467 return X86EMUL_CONTINUE;
2468}
2469
2470static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2471{
2472 struct decode_cache *c = &ctxt->decode;
2473 u64 new_val = c->src.val64;
2474 int cr = c->modrm_reg;
2475
2476 static u64 cr_reserved_bits[] = {
2477 0xffffffff00000000ULL,
2478 0, 0, 0, /* CR3 checked later */
2479 CR4_RESERVED_BITS,
2480 0, 0, 0,
2481 CR8_RESERVED_BITS,
2482 };
2483
2484 if (!valid_cr(cr))
2485 return emulate_ud(ctxt);
2486
2487 if (new_val & cr_reserved_bits[cr])
2488 return emulate_gp(ctxt, 0);
2489
2490 switch (cr) {
2491 case 0: {
2492 u64 cr4, efer;
2493 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2494 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2495 return emulate_gp(ctxt, 0);
2496
2497 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu);
2498 ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);
2499
2500 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2501 !(cr4 & X86_CR4_PAE))
2502 return emulate_gp(ctxt, 0);
2503
2504 break;
2505 }
2506 case 3: {
2507 u64 rsvd = 0;
2508
2509 if (is_long_mode(ctxt->vcpu))
2510 rsvd = CR3_L_MODE_RESERVED_BITS;
2511 else if (is_pae(ctxt->vcpu))
2512 rsvd = CR3_PAE_RESERVED_BITS;
2513 else if (is_paging(ctxt->vcpu))
2514 rsvd = CR3_NONPAE_RESERVED_BITS;
2515
2516 if (new_val & rsvd)
2517 return emulate_gp(ctxt, 0);
2518
2519 break;
2520 }
2521 case 4: {
2522 u64 cr4, efer;
2523
2524 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu);
2525 ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);
2526
2527 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2528 return emulate_gp(ctxt, 0);
2529
2530 break;
2531 }
2532 }
2533
2534 return X86EMUL_CONTINUE;
2535}
2536
2448#define D(_y) { .flags = (_y) } 2537#define D(_y) { .flags = (_y) }
2449#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i } 2538#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2450#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \ 2539#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
@@ -2632,14 +2721,16 @@ static struct opcode opcode_table[256] = {
2632static struct opcode twobyte_table[256] = { 2721static struct opcode twobyte_table[256] = {
2633 /* 0x00 - 0x0F */ 2722 /* 0x00 - 0x0F */
2634 N, GD(0, &group7), N, N, 2723 N, GD(0, &group7), N, N,
2635 N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N, 2724 N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N,
2636 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 2725 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
2637 N, D(ImplicitOps | ModRM), N, N, 2726 N, D(ImplicitOps | ModRM), N, N,
2638 /* 0x10 - 0x1F */ 2727 /* 0x10 - 0x1F */
2639 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, 2728 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2640 /* 0x20 - 0x2F */ 2729 /* 0x20 - 0x2F */
2641 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264), 2730 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
2642 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264), 2731 D(ModRM | DstMem | Priv | Op3264),
2732 DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
2733 D(ModRM | SrcMem | Priv | Op3264),
2643 N, N, N, N, 2734 N, N, N, N,
2644 N, N, N, N, N, N, N, N, 2735 N, N, N, N, N, N, N, N,
2645 /* 0x30 - 0x3F */ 2736 /* 0x30 - 0x3F */
@@ -3724,14 +3815,6 @@ twobyte_insn:
3724 case 0x18: /* Grp16 (prefetch/nop) */ 3815 case 0x18: /* Grp16 (prefetch/nop) */
3725 break; 3816 break;
3726 case 0x20: /* mov cr, reg */ 3817 case 0x20: /* mov cr, reg */
3727 switch (c->modrm_reg) {
3728 case 1:
3729 case 5 ... 7:
3730 case 9 ... 15:
3731 emulate_ud(ctxt);
3732 rc = X86EMUL_PROPAGATE_FAULT;
3733 goto done;
3734 }
3735 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu); 3818 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3736 break; 3819 break;
3737 case 0x21: /* mov from dr to reg */ 3820 case 0x21: /* mov from dr to reg */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 798ebe695f1d..ff4ed3619d00 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3868,11 +3868,90 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3868 update_cr0_intercept(svm); 3868 update_cr0_intercept(svm);
3869} 3869}
3870 3870
3871#define POST_EX(exit) { .exit_code = (exit), \
3872 .stage = X86_ICPT_POST_EXCEPT, \
3873 .valid = true }
3874
3875static struct __x86_intercept {
3876 u32 exit_code;
3877 enum x86_intercept_stage stage;
3878 bool valid;
3879} x86_intercept_map[] = {
3880 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
3881 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
3882 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
3883 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
3884 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
3885};
3886
3887#undef POST_EX
3888
3871static int svm_check_intercept(struct kvm_vcpu *vcpu, 3889static int svm_check_intercept(struct kvm_vcpu *vcpu,
3872 struct x86_instruction_info *info, 3890 struct x86_instruction_info *info,
3873 enum x86_intercept_stage stage) 3891 enum x86_intercept_stage stage)
3874{ 3892{
3875 return X86EMUL_CONTINUE; 3893 struct vcpu_svm *svm = to_svm(vcpu);
3894 int vmexit, ret = X86EMUL_CONTINUE;
3895 struct __x86_intercept icpt_info;
3896 struct vmcb *vmcb = svm->vmcb;
3897
3898 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
3899 goto out;
3900
3901 icpt_info = x86_intercept_map[info->intercept];
3902
3903 if (!icpt_info.valid || stage != icpt_info.stage)
3904 goto out;
3905
3906 switch (icpt_info.exit_code) {
3907 case SVM_EXIT_READ_CR0:
3908 if (info->intercept == x86_intercept_cr_read)
3909 icpt_info.exit_code += info->modrm_reg;
3910 break;
3911 case SVM_EXIT_WRITE_CR0: {
3912 unsigned long cr0, val;
3913 u64 intercept;
3914
3915 if (info->intercept == x86_intercept_cr_write)
3916 icpt_info.exit_code += info->modrm_reg;
3917
3918 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
3919 break;
3920
3921 intercept = svm->nested.intercept;
3922
3923 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
3924 break;
3925
3926 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
3927 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
3928
3929 if (info->intercept == x86_intercept_lmsw) {
3930 cr0 &= 0xfUL;
3931 val &= 0xfUL;
3932 /* lmsw can't clear PE - catch this here */
3933 if (cr0 & X86_CR0_PE)
3934 val |= X86_CR0_PE;
3935 }
3936
3937 if (cr0 ^ val)
3938 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3939
3940 break;
3941 }
3942 default:
3943 break;
3944 }
3945
3946 vmcb->control.next_rip = info->next_rip;
3947 vmcb->control.exit_code = icpt_info.exit_code;
3948 vmexit = nested_svm_exit_handled(svm);
3949
3950 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
3951 : X86EMUL_CONTINUE;
3952
3953out:
3954 return ret;
3876} 3955}
3877 3956
3878static struct kvm_x86_ops svm_x86_ops = { 3957static struct kvm_x86_ops svm_x86_ops = {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eebe5465c8ce..0d6524fa2aff 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -60,19 +60,6 @@
60#include <asm/div64.h> 60#include <asm/div64.h>
61 61
62#define MAX_IO_MSRS 256 62#define MAX_IO_MSRS 256
63#define CR0_RESERVED_BITS \
64 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
65 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
66 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
67#define CR4_RESERVED_BITS \
68 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
69 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
70 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
71 | X86_CR4_OSXSAVE \
72 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
73
74#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
75
76#define KVM_MAX_MCE_BANKS 32 63#define KVM_MAX_MCE_BANKS 32
77#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) 64#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
78 65