diff options
-rw-r--r-- | drivers/kvm/kvm.h | 19 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 15 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 2 | ||||
-rw-r--r-- | drivers/kvm/svm.c | 20 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 22 |
5 files changed, 36 insertions, 42 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index b629a83eb82d..7117c3b3cca7 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -19,15 +19,6 @@ | |||
19 | #include <linux/kvm.h> | 19 | #include <linux/kvm.h> |
20 | #include <linux/kvm_para.h> | 20 | #include <linux/kvm_para.h> |
21 | 21 | ||
22 | #define CR0_PE_MASK (1ULL << 0) | ||
23 | #define CR0_MP_MASK (1ULL << 1) | ||
24 | #define CR0_TS_MASK (1ULL << 3) | ||
25 | #define CR0_NE_MASK (1ULL << 5) | ||
26 | #define CR0_WP_MASK (1ULL << 16) | ||
27 | #define CR0_NW_MASK (1ULL << 29) | ||
28 | #define CR0_CD_MASK (1ULL << 30) | ||
29 | #define CR0_PG_MASK (1ULL << 31) | ||
30 | |||
31 | #define CR3_WPT_MASK (1ULL << 3) | 22 | #define CR3_WPT_MASK (1ULL << 3) |
32 | #define CR3_PCD_MASK (1ULL << 4) | 23 | #define CR3_PCD_MASK (1ULL << 4) |
33 | 24 | ||
@@ -42,11 +33,11 @@ | |||
42 | #define CR4_VMXE_MASK (1ULL << 13) | 33 | #define CR4_VMXE_MASK (1ULL << 13) |
43 | 34 | ||
44 | #define KVM_GUEST_CR0_MASK \ | 35 | #define KVM_GUEST_CR0_MASK \ |
45 | (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ | 36 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ |
46 | | CR0_NW_MASK | CR0_CD_MASK) | 37 | | X86_CR0_NW | X86_CR0_CD) |
47 | #define KVM_VM_CR0_ALWAYS_ON \ | 38 | #define KVM_VM_CR0_ALWAYS_ON \ |
48 | (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \ | 39 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ |
49 | | CR0_MP_MASK) | 40 | | X86_CR0_MP) |
50 | #define KVM_GUEST_CR4_MASK \ | 41 | #define KVM_GUEST_CR4_MASK \ |
51 | (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) | 42 | (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) |
52 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) | 43 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) |
@@ -667,7 +658,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu) | |||
667 | 658 | ||
668 | static inline int is_paging(struct kvm_vcpu *vcpu) | 659 | static inline int is_paging(struct kvm_vcpu *vcpu) |
669 | { | 660 | { |
670 | return vcpu->cr0 & CR0_PG_MASK; | 661 | return vcpu->cr0 & X86_CR0_PG; |
671 | } | 662 | } |
672 | 663 | ||
673 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) | 664 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 326fa79fbebf..5d8febe580de 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -82,7 +82,10 @@ static struct dentry *debugfs_dir; | |||
82 | 82 | ||
83 | #define MAX_IO_MSRS 256 | 83 | #define MAX_IO_MSRS 256 |
84 | 84 | ||
85 | #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL | 85 | #define CR0_RESERVED_BITS \ |
86 | (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ | ||
87 | | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ | ||
88 | | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) | ||
86 | #define LMSW_GUEST_MASK 0x0eULL | 89 | #define LMSW_GUEST_MASK 0x0eULL |
87 | #define CR4_RESEVED_BITS (~((1ULL << 11) - 1)) | 90 | #define CR4_RESEVED_BITS (~((1ULL << 11) - 1)) |
88 | #define CR8_RESEVED_BITS (~0x0fULL) | 91 | #define CR8_RESEVED_BITS (~0x0fULL) |
@@ -466,27 +469,27 @@ out: | |||
466 | 469 | ||
467 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 470 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
468 | { | 471 | { |
469 | if (cr0 & CR0_RESEVED_BITS) { | 472 | if (cr0 & CR0_RESERVED_BITS) { |
470 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | 473 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", |
471 | cr0, vcpu->cr0); | 474 | cr0, vcpu->cr0); |
472 | inject_gp(vcpu); | 475 | inject_gp(vcpu); |
473 | return; | 476 | return; |
474 | } | 477 | } |
475 | 478 | ||
476 | if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) { | 479 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { |
477 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | 480 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); |
478 | inject_gp(vcpu); | 481 | inject_gp(vcpu); |
479 | return; | 482 | return; |
480 | } | 483 | } |
481 | 484 | ||
482 | if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) { | 485 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { |
483 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | 486 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " |
484 | "and a clear PE flag\n"); | 487 | "and a clear PE flag\n"); |
485 | inject_gp(vcpu); | 488 | inject_gp(vcpu); |
486 | return; | 489 | return; |
487 | } | 490 | } |
488 | 491 | ||
489 | if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { | 492 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
490 | #ifdef CONFIG_X86_64 | 493 | #ifdef CONFIG_X86_64 |
491 | if ((vcpu->shadow_efer & EFER_LME)) { | 494 | if ((vcpu->shadow_efer & EFER_LME)) { |
492 | int cs_db, cs_l; | 495 | int cs_db, cs_l; |
@@ -1158,7 +1161,7 @@ int emulate_clts(struct kvm_vcpu *vcpu) | |||
1158 | { | 1161 | { |
1159 | unsigned long cr0; | 1162 | unsigned long cr0; |
1160 | 1163 | ||
1161 | cr0 = vcpu->cr0 & ~CR0_TS_MASK; | 1164 | cr0 = vcpu->cr0 & ~X86_CR0_TS; |
1162 | kvm_arch_ops->set_cr0(vcpu, cr0); | 1165 | kvm_arch_ops->set_cr0(vcpu, cr0); |
1163 | return X86EMUL_CONTINUE; | 1166 | return X86EMUL_CONTINUE; |
1164 | } | 1167 | } |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 23965aa5ee78..75faef4fb086 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -158,7 +158,7 @@ static struct kmem_cache *mmu_page_header_cache; | |||
158 | 158 | ||
159 | static int is_write_protection(struct kvm_vcpu *vcpu) | 159 | static int is_write_protection(struct kvm_vcpu *vcpu) |
160 | { | 160 | { |
161 | return vcpu->cr0 & CR0_WP_MASK; | 161 | return vcpu->cr0 & X86_CR0_WP; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int is_cpuid_PSE36(void) | 164 | static int is_cpuid_PSE36(void) |
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 52a11ccdf0c2..e920c2269af0 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -99,7 +99,7 @@ static unsigned get_addr_size(struct kvm_vcpu *vcpu) | |||
99 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; | 99 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; |
100 | u16 cs_attrib; | 100 | u16 cs_attrib; |
101 | 101 | ||
102 | if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM)) | 102 | if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM)) |
103 | return 2; | 103 | return 2; |
104 | 104 | ||
105 | cs_attrib = sa->cs.attrib; | 105 | cs_attrib = sa->cs.attrib; |
@@ -563,7 +563,7 @@ static void init_vmcb(struct vmcb *vmcb) | |||
563 | * cr0 val on cpu init should be 0x60000010, we enable cpu | 563 | * cr0 val on cpu init should be 0x60000010, we enable cpu |
564 | * cache by default. the orderly way is to enable cache in bios. | 564 | * cache by default. the orderly way is to enable cache in bios. |
565 | */ | 565 | */ |
566 | save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK; | 566 | save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; |
567 | save->cr4 = CR4_PAE_MASK; | 567 | save->cr4 = CR4_PAE_MASK; |
568 | /* rdx = ?? */ | 568 | /* rdx = ?? */ |
569 | } | 569 | } |
@@ -756,25 +756,25 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
756 | { | 756 | { |
757 | #ifdef CONFIG_X86_64 | 757 | #ifdef CONFIG_X86_64 |
758 | if (vcpu->shadow_efer & KVM_EFER_LME) { | 758 | if (vcpu->shadow_efer & KVM_EFER_LME) { |
759 | if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { | 759 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
760 | vcpu->shadow_efer |= KVM_EFER_LMA; | 760 | vcpu->shadow_efer |= KVM_EFER_LMA; |
761 | vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; | 761 | vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; |
762 | } | 762 | } |
763 | 763 | ||
764 | if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) { | 764 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { |
765 | vcpu->shadow_efer &= ~KVM_EFER_LMA; | 765 | vcpu->shadow_efer &= ~KVM_EFER_LMA; |
766 | vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); | 766 | vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); |
767 | } | 767 | } |
768 | } | 768 | } |
769 | #endif | 769 | #endif |
770 | if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) { | 770 | if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { |
771 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 771 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
772 | vcpu->fpu_active = 1; | 772 | vcpu->fpu_active = 1; |
773 | } | 773 | } |
774 | 774 | ||
775 | vcpu->cr0 = cr0; | 775 | vcpu->cr0 = cr0; |
776 | cr0 |= CR0_PG_MASK | CR0_WP_MASK; | 776 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
777 | cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK); | 777 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
778 | vcpu->svm->vmcb->save.cr0 = cr0; | 778 | vcpu->svm->vmcb->save.cr0 = cr0; |
779 | } | 779 | } |
780 | 780 | ||
@@ -945,8 +945,8 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
945 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 945 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
946 | { | 946 | { |
947 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 947 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
948 | if (!(vcpu->cr0 & CR0_TS_MASK)) | 948 | if (!(vcpu->cr0 & X86_CR0_TS)) |
949 | vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK; | 949 | vcpu->svm->vmcb->save.cr0 &= ~X86_CR0_TS; |
950 | vcpu->fpu_active = 1; | 950 | vcpu->fpu_active = 1; |
951 | 951 | ||
952 | return 1; | 952 | return 1; |
@@ -1702,7 +1702,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) | |||
1702 | 1702 | ||
1703 | if (vcpu->fpu_active) { | 1703 | if (vcpu->fpu_active) { |
1704 | vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | 1704 | vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); |
1705 | vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK; | 1705 | vcpu->svm->vmcb->save.cr0 |= X86_CR0_TS; |
1706 | vcpu->fpu_active = 0; | 1706 | vcpu->fpu_active = 0; |
1707 | } | 1707 | } |
1708 | } | 1708 | } |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 7fa62c780ce4..ebd93b4775af 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -436,9 +436,9 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) | |||
436 | if (vcpu->fpu_active) | 436 | if (vcpu->fpu_active) |
437 | return; | 437 | return; |
438 | vcpu->fpu_active = 1; | 438 | vcpu->fpu_active = 1; |
439 | vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK); | 439 | vmcs_clear_bits(GUEST_CR0, X86_CR0_TS); |
440 | if (vcpu->cr0 & CR0_TS_MASK) | 440 | if (vcpu->cr0 & X86_CR0_TS) |
441 | vmcs_set_bits(GUEST_CR0, CR0_TS_MASK); | 441 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); |
442 | update_exception_bitmap(vcpu); | 442 | update_exception_bitmap(vcpu); |
443 | } | 443 | } |
444 | 444 | ||
@@ -447,7 +447,7 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
447 | if (!vcpu->fpu_active) | 447 | if (!vcpu->fpu_active) |
448 | return; | 448 | return; |
449 | vcpu->fpu_active = 0; | 449 | vcpu->fpu_active = 0; |
450 | vmcs_set_bits(GUEST_CR0, CR0_TS_MASK); | 450 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS); |
451 | update_exception_bitmap(vcpu); | 451 | update_exception_bitmap(vcpu); |
452 | } | 452 | } |
453 | 453 | ||
@@ -1002,17 +1002,17 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1002 | { | 1002 | { |
1003 | vmx_fpu_deactivate(vcpu); | 1003 | vmx_fpu_deactivate(vcpu); |
1004 | 1004 | ||
1005 | if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) | 1005 | if (vcpu->rmode.active && (cr0 & X86_CR0_PE)) |
1006 | enter_pmode(vcpu); | 1006 | enter_pmode(vcpu); |
1007 | 1007 | ||
1008 | if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) | 1008 | if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE)) |
1009 | enter_rmode(vcpu); | 1009 | enter_rmode(vcpu); |
1010 | 1010 | ||
1011 | #ifdef CONFIG_X86_64 | 1011 | #ifdef CONFIG_X86_64 |
1012 | if (vcpu->shadow_efer & EFER_LME) { | 1012 | if (vcpu->shadow_efer & EFER_LME) { |
1013 | if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) | 1013 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) |
1014 | enter_lmode(vcpu); | 1014 | enter_lmode(vcpu); |
1015 | if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK)) | 1015 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) |
1016 | exit_lmode(vcpu); | 1016 | exit_lmode(vcpu); |
1017 | } | 1017 | } |
1018 | #endif | 1018 | #endif |
@@ -1022,14 +1022,14 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1022 | (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); | 1022 | (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); |
1023 | vcpu->cr0 = cr0; | 1023 | vcpu->cr0 = cr0; |
1024 | 1024 | ||
1025 | if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK)) | 1025 | if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) |
1026 | vmx_fpu_activate(vcpu); | 1026 | vmx_fpu_activate(vcpu); |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 1029 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
1030 | { | 1030 | { |
1031 | vmcs_writel(GUEST_CR3, cr3); | 1031 | vmcs_writel(GUEST_CR3, cr3); |
1032 | if (vcpu->cr0 & CR0_PE_MASK) | 1032 | if (vcpu->cr0 & X86_CR0_PE) |
1033 | vmx_fpu_deactivate(vcpu); | 1033 | vmx_fpu_deactivate(vcpu); |
1034 | } | 1034 | } |
1035 | 1035 | ||
@@ -1778,7 +1778,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1778 | case 2: /* clts */ | 1778 | case 2: /* clts */ |
1779 | vcpu_load_rsp_rip(vcpu); | 1779 | vcpu_load_rsp_rip(vcpu); |
1780 | vmx_fpu_deactivate(vcpu); | 1780 | vmx_fpu_deactivate(vcpu); |
1781 | vcpu->cr0 &= ~CR0_TS_MASK; | 1781 | vcpu->cr0 &= ~X86_CR0_TS; |
1782 | vmcs_writel(CR0_READ_SHADOW, vcpu->cr0); | 1782 | vmcs_writel(CR0_READ_SHADOW, vcpu->cr0); |
1783 | vmx_fpu_activate(vcpu); | 1783 | vmx_fpu_activate(vcpu); |
1784 | skip_emulated_instruction(vcpu); | 1784 | skip_emulated_instruction(vcpu); |