diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-07-17 09:34:16 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:18 -0400 |
commit | 66aee91aaab8f998d28a61ed7733be17ad8e6d8f (patch) | |
tree | f3cd552c4a176cbba0929788b03867cb33d7b5b5 | |
parent | f802a307cb2cabdd0c6b48067dbe901d6fe27246 (diff) |
KVM: Use standard CR4 flags, tighten checking
On this machine (Intel), writing to the CR4 bits 0x00000800 and
0x00001000 cause a GPF. The Intel manual is a little unclear, but
AFIACT they're reserved, too.
Also fix spelling of CR4_RESEVED_BITS.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | drivers/kvm/kvm.h | 16 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 16 | ||||
-rw-r--r-- | drivers/kvm/svm.c | 7 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 8 | ||||
-rw-r--r-- | drivers/kvm/vmx.h | 2 |
5 files changed, 22 insertions, 27 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 983c33f38377..25439a5968f9 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -23,12 +23,6 @@ | |||
23 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | 23 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) |
24 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) | 24 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) |
25 | 25 | ||
26 | #define CR4_VME_MASK (1ULL << 0) | ||
27 | #define CR4_PSE_MASK (1ULL << 4) | ||
28 | #define CR4_PAE_MASK (1ULL << 5) | ||
29 | #define CR4_PGE_MASK (1ULL << 7) | ||
30 | #define CR4_VMXE_MASK (1ULL << 13) | ||
31 | |||
32 | #define KVM_GUEST_CR0_MASK \ | 26 | #define KVM_GUEST_CR0_MASK \ |
33 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ | 27 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ |
34 | | X86_CR0_NW | X86_CR0_CD) | 28 | | X86_CR0_NW | X86_CR0_CD) |
@@ -36,9 +30,9 @@ | |||
36 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ | 30 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ |
37 | | X86_CR0_MP) | 31 | | X86_CR0_MP) |
38 | #define KVM_GUEST_CR4_MASK \ | 32 | #define KVM_GUEST_CR4_MASK \ |
39 | (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) | 33 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) |
40 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) | 34 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
41 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK) | 35 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
42 | 36 | ||
43 | #define INVALID_PAGE (~(hpa_t)0) | 37 | #define INVALID_PAGE (~(hpa_t)0) |
44 | #define UNMAPPED_GVA (~(gpa_t)0) | 38 | #define UNMAPPED_GVA (~(gpa_t)0) |
@@ -645,12 +639,12 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) | |||
645 | 639 | ||
646 | static inline int is_pae(struct kvm_vcpu *vcpu) | 640 | static inline int is_pae(struct kvm_vcpu *vcpu) |
647 | { | 641 | { |
648 | return vcpu->cr4 & CR4_PAE_MASK; | 642 | return vcpu->cr4 & X86_CR4_PAE; |
649 | } | 643 | } |
650 | 644 | ||
651 | static inline int is_pse(struct kvm_vcpu *vcpu) | 645 | static inline int is_pse(struct kvm_vcpu *vcpu) |
652 | { | 646 | { |
653 | return vcpu->cr4 & CR4_PSE_MASK; | 647 | return vcpu->cr4 & X86_CR4_PSE; |
654 | } | 648 | } |
655 | 649 | ||
656 | static inline int is_paging(struct kvm_vcpu *vcpu) | 650 | static inline int is_paging(struct kvm_vcpu *vcpu) |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 34a571dee514..af02320012c9 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -86,8 +86,12 @@ static struct dentry *debugfs_dir; | |||
86 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | 86 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ |
87 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | 87 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ |
88 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | 88 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) |
89 | #define LMSW_GUEST_MASK 0x0eULL | 89 | #define CR4_RESERVED_BITS \ |
90 | #define CR4_RESEVED_BITS (~((1ULL << 11) - 1)) | 90 | (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ |
91 | | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | ||
92 | | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ | ||
93 | | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) | ||
94 | |||
91 | #define CR8_RESEVED_BITS (~0x0fULL) | 95 | #define CR8_RESEVED_BITS (~0x0fULL) |
92 | #define EFER_RESERVED_BITS 0xfffffffffffff2fe | 96 | #define EFER_RESERVED_BITS 0xfffffffffffff2fe |
93 | 97 | ||
@@ -537,26 +541,26 @@ EXPORT_SYMBOL_GPL(lmsw); | |||
537 | 541 | ||
538 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 542 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
539 | { | 543 | { |
540 | if (cr4 & CR4_RESEVED_BITS) { | 544 | if (cr4 & CR4_RESERVED_BITS) { |
541 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | 545 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); |
542 | inject_gp(vcpu); | 546 | inject_gp(vcpu); |
543 | return; | 547 | return; |
544 | } | 548 | } |
545 | 549 | ||
546 | if (is_long_mode(vcpu)) { | 550 | if (is_long_mode(vcpu)) { |
547 | if (!(cr4 & CR4_PAE_MASK)) { | 551 | if (!(cr4 & X86_CR4_PAE)) { |
548 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | 552 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " |
549 | "in long mode\n"); | 553 | "in long mode\n"); |
550 | inject_gp(vcpu); | 554 | inject_gp(vcpu); |
551 | return; | 555 | return; |
552 | } | 556 | } |
553 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) | 557 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) |
554 | && !load_pdptrs(vcpu, vcpu->cr3)) { | 558 | && !load_pdptrs(vcpu, vcpu->cr3)) { |
555 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | 559 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); |
556 | inject_gp(vcpu); | 560 | inject_gp(vcpu); |
557 | } | 561 | } |
558 | 562 | ||
559 | if (cr4 & CR4_VMXE_MASK) { | 563 | if (cr4 & X86_CR4_VMXE) { |
560 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | 564 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); |
561 | inject_gp(vcpu); | 565 | inject_gp(vcpu); |
562 | return; | 566 | return; |
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index e920c2269af0..5c058fa1c8ad 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -38,7 +38,6 @@ MODULE_LICENSE("GPL"); | |||
38 | 38 | ||
39 | #define DR7_GD_MASK (1 << 13) | 39 | #define DR7_GD_MASK (1 << 13) |
40 | #define DR6_BD_MASK (1 << 13) | 40 | #define DR6_BD_MASK (1 << 13) |
41 | #define CR4_DE_MASK (1UL << 3) | ||
42 | 41 | ||
43 | #define SEG_TYPE_LDT 2 | 42 | #define SEG_TYPE_LDT 2 |
44 | #define SEG_TYPE_BUSY_TSS16 3 | 43 | #define SEG_TYPE_BUSY_TSS16 3 |
@@ -564,7 +563,7 @@ static void init_vmcb(struct vmcb *vmcb) | |||
564 | * cache by default. the orderly way is to enable cache in bios. | 563 | * cache by default. the orderly way is to enable cache in bios. |
565 | */ | 564 | */ |
566 | save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; | 565 | save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; |
567 | save->cr4 = CR4_PAE_MASK; | 566 | save->cr4 = X86_CR4_PAE; |
568 | /* rdx = ?? */ | 567 | /* rdx = ?? */ |
569 | } | 568 | } |
570 | 569 | ||
@@ -781,7 +780,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
781 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 780 | static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
782 | { | 781 | { |
783 | vcpu->cr4 = cr4; | 782 | vcpu->cr4 = cr4; |
784 | vcpu->svm->vmcb->save.cr4 = cr4 | CR4_PAE_MASK; | 783 | vcpu->svm->vmcb->save.cr4 = cr4 | X86_CR4_PAE; |
785 | } | 784 | } |
786 | 785 | ||
787 | static void svm_set_segment(struct kvm_vcpu *vcpu, | 786 | static void svm_set_segment(struct kvm_vcpu *vcpu, |
@@ -877,7 +876,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
877 | vcpu->svm->db_regs[dr] = value; | 876 | vcpu->svm->db_regs[dr] = value; |
878 | return; | 877 | return; |
879 | case 4 ... 5: | 878 | case 4 ... 5: |
880 | if (vcpu->cr4 & CR4_DE_MASK) { | 879 | if (vcpu->cr4 & X86_CR4_DE) { |
881 | *exception = UD_VECTOR; | 880 | *exception = UD_VECTOR; |
882 | return; | 881 | return; |
883 | } | 882 | } |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index ebd93b4775af..f3e78187e892 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -764,7 +764,7 @@ static void hardware_enable(void *garbage) | |||
764 | if ((old & 5) != 5) | 764 | if ((old & 5) != 5) |
765 | /* enable and lock */ | 765 | /* enable and lock */ |
766 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5); | 766 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5); |
767 | write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */ | 767 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
768 | asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) | 768 | asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) |
769 | : "memory", "cc"); | 769 | : "memory", "cc"); |
770 | } | 770 | } |
@@ -879,8 +879,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
879 | flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT); | 879 | flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT); |
880 | vmcs_writel(GUEST_RFLAGS, flags); | 880 | vmcs_writel(GUEST_RFLAGS, flags); |
881 | 881 | ||
882 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) | | 882 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
883 | (vmcs_readl(CR4_READ_SHADOW) & CR4_VME_MASK)); | 883 | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); |
884 | 884 | ||
885 | update_exception_bitmap(vcpu); | 885 | update_exception_bitmap(vcpu); |
886 | 886 | ||
@@ -937,7 +937,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
937 | flags |= IOPL_MASK | X86_EFLAGS_VM; | 937 | flags |= IOPL_MASK | X86_EFLAGS_VM; |
938 | 938 | ||
939 | vmcs_writel(GUEST_RFLAGS, flags); | 939 | vmcs_writel(GUEST_RFLAGS, flags); |
940 | vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK); | 940 | vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); |
941 | update_exception_bitmap(vcpu); | 941 | update_exception_bitmap(vcpu); |
942 | 942 | ||
943 | vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); | 943 | vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); |
diff --git a/drivers/kvm/vmx.h b/drivers/kvm/vmx.h index d0dc93df411b..76ad7933cded 100644 --- a/drivers/kvm/vmx.h +++ b/drivers/kvm/vmx.h | |||
@@ -285,8 +285,6 @@ enum vmcs_field { | |||
285 | 285 | ||
286 | #define AR_RESERVD_MASK 0xfffe0f00 | 286 | #define AR_RESERVD_MASK 0xfffe0f00 |
287 | 287 | ||
288 | #define CR4_VMXE 0x2000 | ||
289 | |||
290 | #define MSR_IA32_VMX_BASIC 0x480 | 288 | #define MSR_IA32_VMX_BASIC 0x480 |
291 | #define MSR_IA32_FEATURE_CONTROL 0x03a | 289 | #define MSR_IA32_FEATURE_CONTROL 0x03a |
292 | #define MSR_IA32_VMX_PINBASED_CTLS 0x481 | 290 | #define MSR_IA32_VMX_PINBASED_CTLS 0x481 |