aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2007-04-27 02:29:21 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:31 -0400
commit25c4c2762e31a75403eca0dd59f2cab85e3a2532 (patch)
tree151c4a309da81608bdf729d701dcbfc3d4e1e9d7
parente0e5127d06957e76da3906b7a58d5d2665e81f59 (diff)
KVM: VMX: Properly shadow the CR0 register in the vcpu struct
Set all of the host mask bits for CR0 so that we can maintain a proper shadow of CR0. This exposes CR0.TS, paving the way for lazy fpu handling. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--drivers/kvm/kvm.h2
-rw-r--r--drivers/kvm/kvm_main.c8
-rw-r--r--drivers/kvm/svm.c4
-rw-r--r--drivers/kvm/vmx.c14
4 files changed, 14 insertions, 14 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 61ff085df7e6..f99e89e185b2 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -397,7 +397,7 @@ struct kvm_arch_ops {
397 void (*set_segment)(struct kvm_vcpu *vcpu, 397 void (*set_segment)(struct kvm_vcpu *vcpu,
398 struct kvm_segment *var, int seg); 398 struct kvm_segment *var, int seg);
399 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 399 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
400 void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu); 400 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
401 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 401 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
402 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 402 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
403 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 403 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 67554034d001..cdf0b176851d 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -510,7 +510,6 @@ EXPORT_SYMBOL_GPL(set_cr0);
510 510
511void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 511void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
512{ 512{
513 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
514 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); 513 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
515} 514}
516EXPORT_SYMBOL_GPL(lmsw); 515EXPORT_SYMBOL_GPL(lmsw);
@@ -1117,7 +1116,6 @@ int emulate_clts(struct kvm_vcpu *vcpu)
1117{ 1116{
1118 unsigned long cr0; 1117 unsigned long cr0;
1119 1118
1120 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
1121 cr0 = vcpu->cr0 & ~CR0_TS_MASK; 1119 cr0 = vcpu->cr0 & ~CR0_TS_MASK;
1122 kvm_arch_ops->set_cr0(vcpu, cr0); 1120 kvm_arch_ops->set_cr0(vcpu, cr0);
1123 return X86EMUL_CONTINUE; 1121 return X86EMUL_CONTINUE;
@@ -1318,7 +1316,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1318 1316
1319unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) 1317unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1320{ 1318{
1321 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); 1319 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
1322 switch (cr) { 1320 switch (cr) {
1323 case 0: 1321 case 0:
1324 return vcpu->cr0; 1322 return vcpu->cr0;
@@ -1934,7 +1932,7 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1934 sregs->gdt.limit = dt.limit; 1932 sregs->gdt.limit = dt.limit;
1935 sregs->gdt.base = dt.base; 1933 sregs->gdt.base = dt.base;
1936 1934
1937 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); 1935 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
1938 sregs->cr0 = vcpu->cr0; 1936 sregs->cr0 = vcpu->cr0;
1939 sregs->cr2 = vcpu->cr2; 1937 sregs->cr2 = vcpu->cr2;
1940 sregs->cr3 = vcpu->cr3; 1938 sregs->cr3 = vcpu->cr3;
@@ -1985,7 +1983,7 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1985#endif 1983#endif
1986 vcpu->apic_base = sregs->apic_base; 1984 vcpu->apic_base = sregs->apic_base;
1987 1985
1988 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu); 1986 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
1989 1987
1990 mmu_reset_needed |= vcpu->cr0 != sregs->cr0; 1988 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
1991 kvm_arch_ops->set_cr0(vcpu, sregs->cr0); 1989 kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 2a7a0390bfb1..bddd0238869d 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -738,7 +738,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
738 vcpu->svm->vmcb->save.gdtr.base = dt->base ; 738 vcpu->svm->vmcb->save.gdtr.base = dt->base ;
739} 739}
740 740
741static void svm_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu) 741static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
742{ 742{
743} 743}
744 744
@@ -1759,7 +1759,7 @@ static struct kvm_arch_ops svm_arch_ops = {
1759 .get_segment = svm_get_segment, 1759 .get_segment = svm_get_segment,
1760 .set_segment = svm_set_segment, 1760 .set_segment = svm_set_segment,
1761 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 1761 .get_cs_db_l_bits = svm_get_cs_db_l_bits,
1762 .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits, 1762 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
1763 .set_cr0 = svm_set_cr0, 1763 .set_cr0 = svm_set_cr0,
1764 .set_cr3 = svm_set_cr3, 1764 .set_cr3 = svm_set_cr3,
1765 .set_cr4 = svm_set_cr4, 1765 .set_cr4 = svm_set_cr4,
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index d28c848138ce..09608114e29a 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -810,11 +810,8 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
810 810
811#endif 811#endif
812 812
813static void vmx_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu) 813static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
814{ 814{
815 vcpu->cr0 &= KVM_GUEST_CR0_MASK;
816 vcpu->cr0 |= vmcs_readl(GUEST_CR0) & ~KVM_GUEST_CR0_MASK;
817
818 vcpu->cr4 &= KVM_GUEST_CR4_MASK; 815 vcpu->cr4 &= KVM_GUEST_CR4_MASK;
819 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; 816 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
820} 817}
@@ -1205,7 +1202,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1205 vmcs_writel(TPR_THRESHOLD, 0); 1202 vmcs_writel(TPR_THRESHOLD, 0);
1206#endif 1203#endif
1207 1204
1208 vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK); 1205 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1209 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); 1206 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1210 1207
1211 vcpu->cr0 = 0x60000010; 1208 vcpu->cr0 = 0x60000010;
@@ -1557,6 +1554,11 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1557 return 1; 1554 return 1;
1558 }; 1555 };
1559 break; 1556 break;
1557 case 2: /* clts */
1558 vcpu_load_rsp_rip(vcpu);
1559 set_cr0(vcpu, vcpu->cr0 & ~CR0_TS_MASK);
1560 skip_emulated_instruction(vcpu);
1561 return 1;
1560 case 1: /*mov from cr*/ 1562 case 1: /*mov from cr*/
1561 switch (cr) { 1563 switch (cr) {
1562 case 3: 1564 case 3:
@@ -2112,7 +2114,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
2112 .get_segment = vmx_get_segment, 2114 .get_segment = vmx_get_segment,
2113 .set_segment = vmx_set_segment, 2115 .set_segment = vmx_set_segment,
2114 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 2116 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2115 .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits, 2117 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
2116 .set_cr0 = vmx_set_cr0, 2118 .set_cr0 = vmx_set_cr0,
2117 .set_cr3 = vmx_set_cr3, 2119 .set_cr3 = vmx_set_cr3,
2118 .set_cr4 = vmx_set_cr4, 2120 .set_cr4 = vmx_set_cr4,