aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-12-29 11:07:30 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:50 -0500
commit4d4ec0874583b127caac1d0f84033c8971b2fd2a (patch)
tree2ed5d1edff6a5253561fff0593e89d1c49518b1b /arch/x86/kvm/x86.c
parenta1f83a74feaa9718a5c61587256ea6cc1b993d16 (diff)
KVM: Replace read accesses of vcpu->arch.cr0 by an accessor
Since we'd like to allow the guest to own a few bits of cr0 at times, we need to know when we access those bits. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c990424d86d0..748b15d8e46d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -430,7 +430,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
430{ 430{
431 if (cr0 & CR0_RESERVED_BITS) { 431 if (cr0 & CR0_RESERVED_BITS) {
432 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", 432 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
433 cr0, vcpu->arch.cr0); 433 cr0, kvm_read_cr0(vcpu));
434 kvm_inject_gp(vcpu, 0); 434 kvm_inject_gp(vcpu, 0);
435 return; 435 return;
436 } 436 }
@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
488 488
489void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 489void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
490{ 490{
491 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); 491 kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
492} 492}
493EXPORT_SYMBOL_GPL(kvm_lmsw); 493EXPORT_SYMBOL_GPL(kvm_lmsw);
494 494
@@ -3095,7 +3095,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3095 3095
3096int emulate_clts(struct kvm_vcpu *vcpu) 3096int emulate_clts(struct kvm_vcpu *vcpu)
3097{ 3097{
3098 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); 3098 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
3099 return X86EMUL_CONTINUE; 3099 return X86EMUL_CONTINUE;
3100} 3100}
3101 3101
@@ -3714,7 +3714,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3714 3714
3715 switch (cr) { 3715 switch (cr) {
3716 case 0: 3716 case 0:
3717 value = vcpu->arch.cr0; 3717 value = kvm_read_cr0(vcpu);
3718 break; 3718 break;
3719 case 2: 3719 case 2:
3720 value = vcpu->arch.cr2; 3720 value = vcpu->arch.cr2;
@@ -3741,7 +3741,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3741{ 3741{
3742 switch (cr) { 3742 switch (cr) {
3743 case 0: 3743 case 0:
3744 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); 3744 kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
3745 *rflags = kvm_get_rflags(vcpu); 3745 *rflags = kvm_get_rflags(vcpu);
3746 break; 3746 break;
3747 case 2: 3747 case 2:
@@ -4335,7 +4335,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4335 sregs->gdt.limit = dt.limit; 4335 sregs->gdt.limit = dt.limit;
4336 sregs->gdt.base = dt.base; 4336 sregs->gdt.base = dt.base;
4337 4337
4338 sregs->cr0 = vcpu->arch.cr0; 4338 sregs->cr0 = kvm_read_cr0(vcpu);
4339 sregs->cr2 = vcpu->arch.cr2; 4339 sregs->cr2 = vcpu->arch.cr2;
4340 sregs->cr3 = vcpu->arch.cr3; 4340 sregs->cr3 = vcpu->arch.cr3;
4341 sregs->cr4 = kvm_read_cr4(vcpu); 4341 sregs->cr4 = kvm_read_cr4(vcpu);
@@ -4521,7 +4521,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4521{ 4521{
4522 struct kvm_segment kvm_seg; 4522 struct kvm_segment kvm_seg;
4523 4523
4524 if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE)) 4524 if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
4525 return kvm_load_realmode_segment(vcpu, selector, seg); 4525 return kvm_load_realmode_segment(vcpu, selector, seg);
4526 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) 4526 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
4527 return 1; 4527 return 1;
@@ -4799,7 +4799,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4799 &nseg_desc); 4799 &nseg_desc);
4800 } 4800 }
4801 4801
4802 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS); 4802 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
4803 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg); 4803 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
4804 tr_seg.type = 11; 4804 tr_seg.type = 11;
4805 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); 4805 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
@@ -4834,7 +4834,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4834 kvm_x86_ops->set_efer(vcpu, sregs->efer); 4834 kvm_x86_ops->set_efer(vcpu, sregs->efer);
4835 kvm_set_apic_base(vcpu, sregs->apic_base); 4835 kvm_set_apic_base(vcpu, sregs->apic_base);
4836 4836
4837 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; 4837 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
4838 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); 4838 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
4839 vcpu->arch.cr0 = sregs->cr0; 4839 vcpu->arch.cr0 = sregs->cr0;
4840 4840
@@ -4873,7 +4873,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4873 /* Older userspace won't unhalt the vcpu on reset. */ 4873 /* Older userspace won't unhalt the vcpu on reset. */
4874 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 4874 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
4875 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 4875 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4876 !(vcpu->arch.cr0 & X86_CR0_PE)) 4876 !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
4877 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4877 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4878 4878
4879 vcpu_put(vcpu); 4879 vcpu_put(vcpu);