diff options
author | Sheng Yang <sheng@linux.intel.com> | 2010-05-12 04:40:42 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 03:35:45 -0400 |
commit | aad827034e419fa8c5ec39e6455266f0b942d856 (patch) | |
tree | c49cbdc633b0146c9af9fdf5036dfdb09a008d97 | |
parent | 62ad07551a2ace89e35604d1c55fdae1dd3359a8 (diff) |
KVM: VMX: Only reset MMU when necessary
Only modifying some bits of CR0/CR4 needs paging mode switch.
Modify EFER.NXE bit would result in reserved bit updates.
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | arch/x86/kvm/x86.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 03039fd86980..78147f0421a0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -416,6 +416,10 @@ out: | |||
416 | 416 | ||
417 | static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | 417 | static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
418 | { | 418 | { |
419 | unsigned long old_cr0 = kvm_read_cr0(vcpu); | ||
420 | unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | | ||
421 | X86_CR0_CD | X86_CR0_NW; | ||
422 | |||
419 | cr0 |= X86_CR0_ET; | 423 | cr0 |= X86_CR0_ET; |
420 | 424 | ||
421 | #ifdef CONFIG_X86_64 | 425 | #ifdef CONFIG_X86_64 |
@@ -449,7 +453,8 @@ static int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
449 | 453 | ||
450 | kvm_x86_ops->set_cr0(vcpu, cr0); | 454 | kvm_x86_ops->set_cr0(vcpu, cr0); |
451 | 455 | ||
452 | kvm_mmu_reset_context(vcpu); | 456 | if ((cr0 ^ old_cr0) & update_bits) |
457 | kvm_mmu_reset_context(vcpu); | ||
453 | return 0; | 458 | return 0; |
454 | } | 459 | } |
455 | 460 | ||
@@ -487,7 +492,8 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
487 | 492 | ||
488 | kvm_x86_ops->set_cr4(vcpu, cr4); | 493 | kvm_x86_ops->set_cr4(vcpu, cr4); |
489 | 494 | ||
490 | kvm_mmu_reset_context(vcpu); | 495 | if ((cr4 ^ old_cr4) & pdptr_bits) |
496 | kvm_mmu_reset_context(vcpu); | ||
491 | 497 | ||
492 | return 0; | 498 | return 0; |
493 | } | 499 | } |
@@ -693,6 +699,8 @@ static u32 emulated_msrs[] = { | |||
693 | 699 | ||
694 | static int set_efer(struct kvm_vcpu *vcpu, u64 efer) | 700 | static int set_efer(struct kvm_vcpu *vcpu, u64 efer) |
695 | { | 701 | { |
702 | u64 old_efer = vcpu->arch.efer; | ||
703 | |||
696 | if (efer & efer_reserved_bits) | 704 | if (efer & efer_reserved_bits) |
697 | return 1; | 705 | return 1; |
698 | 706 | ||
@@ -724,6 +732,10 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
724 | vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; | 732 | vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; |
725 | kvm_mmu_reset_context(vcpu); | 733 | kvm_mmu_reset_context(vcpu); |
726 | 734 | ||
735 | /* Update reserved bits */ | ||
736 | if ((efer ^ old_efer) & EFER_NX) | ||
737 | kvm_mmu_reset_context(vcpu); | ||
738 | |||
727 | return 0; | 739 | return 0; |
728 | } | 740 | } |
729 | 741 | ||