diff options
author | Gleb Natapov <gleb@redhat.com> | 2013-01-30 09:45:02 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2013-02-04 20:24:28 -0500 |
commit | 2c9afa52ef081334925905d6370d36b6602c328c (patch) | |
tree | 7d79e7a1bc814f52f7daeef14c654ad67d7ca01f /arch/x86/kvm | |
parent | 9bb4f6b15ec038ab9afcf346aa6a590406ad6c17 (diff) |
KVM: MMU: set base_role.nxe during mmu initialization.
Move base_role.nxe initialisation to where all other roles are initialized.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 1 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 |
2 files changed, 1 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 40737b38da19..8028ac65db18 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3687,6 +3687,7 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) | |||
3687 | else | 3687 | else |
3688 | r = paging32_init_context(vcpu, context); | 3688 | r = paging32_init_context(vcpu, context); |
3689 | 3689 | ||
3690 | vcpu->arch.mmu.base_role.nxe = is_nx(vcpu); | ||
3690 | vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); | 3691 | vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); |
3691 | vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); | 3692 | vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); |
3692 | vcpu->arch.mmu.base_role.smep_andnot_wp | 3693 | vcpu->arch.mmu.base_role.smep_andnot_wp |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf512e70c797..373e17a0d398 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -870,8 +870,6 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
870 | 870 | ||
871 | kvm_x86_ops->set_efer(vcpu, efer); | 871 | kvm_x86_ops->set_efer(vcpu, efer); |
872 | 872 | ||
873 | vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; | ||
874 | |||
875 | /* Update reserved bits */ | 873 | /* Update reserved bits */ |
876 | if ((efer ^ old_efer) & EFER_NX) | 874 | if ((efer ^ old_efer) & EFER_NX) |
877 | kvm_mmu_reset_context(vcpu); | 875 | kvm_mmu_reset_context(vcpu); |