diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-07-17 09:34:16 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:18 -0400 |
commit | 66aee91aaab8f998d28a61ed7733be17ad8e6d8f (patch) | |
tree | f3cd552c4a176cbba0929788b03867cb33d7b5b5 /drivers/kvm/kvm_main.c | |
parent | f802a307cb2cabdd0c6b48067dbe901d6fe27246 (diff) |
KVM: Use standard CR4 flags, tighten checking
On this machine (Intel), writing to the CR4 bits 0x00000800 and
0x00001000 cause a GPF. The Intel manual is a little unclear, but
AFIACT they're reserved, too.
Also fix spelling of CR4_RESEVED_BITS.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 34a571dee514..af02320012c9 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -86,8 +86,12 @@ static struct dentry *debugfs_dir; | |||
86 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | 86 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ |
87 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | 87 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ |
88 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | 88 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) |
89 | #define LMSW_GUEST_MASK 0x0eULL | 89 | #define CR4_RESERVED_BITS \ |
90 | #define CR4_RESEVED_BITS (~((1ULL << 11) - 1)) | 90 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ |
91 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | ||
92 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ | ||
93 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) | ||
94 | |||
91 | #define CR8_RESEVED_BITS (~0x0fULL) | 95 | #define CR8_RESEVED_BITS (~0x0fULL) |
92 | #define EFER_RESERVED_BITS 0xfffffffffffff2fe | 96 | #define EFER_RESERVED_BITS 0xfffffffffffff2fe |
93 | 97 | ||
@@ -537,26 +541,26 @@ EXPORT_SYMBOL_GPL(lmsw); | |||
537 | 541 | ||
538 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 542 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
539 | { | 543 | { |
540 | if (cr4 & CR4_RESEVED_BITS) { | 544 | if (cr4 & CR4_RESERVED_BITS) { |
541 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | 545 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); |
542 | inject_gp(vcpu); | 546 | inject_gp(vcpu); |
543 | return; | 547 | return; |
544 | } | 548 | } |
545 | 549 | ||
546 | if (is_long_mode(vcpu)) { | 550 | if (is_long_mode(vcpu)) { |
547 | if (!(cr4 & CR4_PAE_MASK)) { | 551 | if (!(cr4 & X86_CR4_PAE)) { |
548 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | 552 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " |
549 | "in long mode\n"); | 553 | "in long mode\n"); |
550 | inject_gp(vcpu); | 554 | inject_gp(vcpu); |
551 | return; | 555 | return; |
552 | } | 556 | } |
553 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) | 557 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) |
554 | && !load_pdptrs(vcpu, vcpu->cr3)) { | 558 | && !load_pdptrs(vcpu, vcpu->cr3)) { |
555 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | 559 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); |
556 | inject_gp(vcpu); | 560 | inject_gp(vcpu); |
557 | } | 561 | } |
558 | 562 | ||
559 | if (cr4 & CR4_VMXE_MASK) { | 563 | if (cr4 & X86_CR4_VMXE) { |
560 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | 564 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); |
561 | inject_gp(vcpu); | 565 | inject_gp(vcpu); |
562 | return; | 566 | return; |