diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-07-17 09:19:08 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:18 -0400 |
commit | 707d92fa72b425bc919a84670c01402e81505c58 (patch) | |
tree | 9e3a55293e867e499fd625ebac45db565ff0fc38 /drivers/kvm/svm.c | |
parent | 9a2b85c620b9779360c7726de4caeda78cac38d4 (diff) |
KVM: Trivial: Use standard CR0 flags macros from asm/cpu-features.h
The kernel now has asm/cpu-features.h: use those macros instead of
inventing our own.
Also spell out definition of CR0_RESEVED_BITS (no code change) and fix typo.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/svm.c')
-rw-r--r-- | drivers/kvm/svm.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 52a11ccdf0c2..e920c2269af0 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -99,7 +99,7 @@ static unsigned get_addr_size(struct kvm_vcpu *vcpu) | |||
99 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; | 99 | struct vmcb_save_area *sa = &vcpu->svm->vmcb->save; |
100 | u16 cs_attrib; | 100 | u16 cs_attrib; |
101 | 101 | ||
102 | if (!(sa->cr0 & CR0_PE_MASK) || (sa->rflags & X86_EFLAGS_VM)) | 102 | if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM)) |
103 | return 2; | 103 | return 2; |
104 | 104 | ||
105 | cs_attrib = sa->cs.attrib; | 105 | cs_attrib = sa->cs.attrib; |
@@ -563,7 +563,7 @@ static void init_vmcb(struct vmcb *vmcb) | |||
563 | * cr0 val on cpu init should be 0x60000010, we enable cpu | 563 | * cr0 val on cpu init should be 0x60000010, we enable cpu |
564 | * cache by default. the orderly way is to enable cache in bios. | 564 | * cache by default. the orderly way is to enable cache in bios. |
565 | */ | 565 | */ |
566 | save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK; | 566 | save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP; |
567 | save->cr4 = CR4_PAE_MASK; | 567 | save->cr4 = CR4_PAE_MASK; |
568 | /* rdx = ?? */ | 568 | /* rdx = ?? */ |
569 | } | 569 | } |
@@ -756,25 +756,25 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
756 | { | 756 | { |
757 | #ifdef CONFIG_X86_64 | 757 | #ifdef CONFIG_X86_64 |
758 | if (vcpu->shadow_efer & KVM_EFER_LME) { | 758 | if (vcpu->shadow_efer & KVM_EFER_LME) { |
759 | if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { | 759 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
760 | vcpu->shadow_efer |= KVM_EFER_LMA; | 760 | vcpu->shadow_efer |= KVM_EFER_LMA; |
761 | vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; | 761 | vcpu->svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME; |
762 | } | 762 | } |
763 | 763 | ||
764 | if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK) ) { | 764 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) { |
765 | vcpu->shadow_efer &= ~KVM_EFER_LMA; | 765 | vcpu->shadow_efer &= ~KVM_EFER_LMA; |
766 | vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); | 766 | vcpu->svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME); |
767 | } | 767 | } |
768 | } | 768 | } |
769 | #endif | 769 | #endif |
770 | if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) { | 770 | if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { |
771 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 771 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
772 | vcpu->fpu_active = 1; | 772 | vcpu->fpu_active = 1; |
773 | } | 773 | } |
774 | 774 | ||
775 | vcpu->cr0 = cr0; | 775 | vcpu->cr0 = cr0; |
776 | cr0 |= CR0_PG_MASK | CR0_WP_MASK; | 776 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
777 | cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK); | 777 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
778 | vcpu->svm->vmcb->save.cr0 = cr0; | 778 | vcpu->svm->vmcb->save.cr0 = cr0; |
779 | } | 779 | } |
780 | 780 | ||
@@ -945,8 +945,8 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
945 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 945 | static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
946 | { | 946 | { |
947 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); | 947 | vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); |
948 | if (!(vcpu->cr0 & CR0_TS_MASK)) | 948 | if (!(vcpu->cr0 & X86_CR0_TS)) |
949 | vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK; | 949 | vcpu->svm->vmcb->save.cr0 &= ~X86_CR0_TS; |
950 | vcpu->fpu_active = 1; | 950 | vcpu->fpu_active = 1; |
951 | 951 | ||
952 | return 1; | 952 | return 1; |
@@ -1702,7 +1702,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) | |||
1702 | 1702 | ||
1703 | if (vcpu->fpu_active) { | 1703 | if (vcpu->fpu_active) { |
1704 | vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | 1704 | vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); |
1705 | vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK; | 1705 | vcpu->svm->vmcb->save.cr0 |= X86_CR0_TS; |
1706 | vcpu->fpu_active = 0; | 1706 | vcpu->fpu_active = 0; |
1707 | } | 1707 | } |
1708 | } | 1708 | } |