aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-06-06 09:11:54 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:26 -0400
commit411c588dfb863feee78b721d5e7c86ac38921c49 (patch)
treebb60c136f0392fbbd8f222fd2049f265746a7eae /arch/x86
parenta01c8f9b4e266df1d7166d23216f2060648f862d (diff)
KVM: MMU: Adjust shadow paging to work when SMEP=1 and CR0.WP=0
When CR0.WP=0, we sometimes map user pages as kernel pages (to allow the kernel to write to them). Unfortunately this also allows the kernel to fetch from these pages, even if CR4.SMEP is set. Adjust for this by also setting NX on the spte in these circumstances. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.c14
2 files changed, 14 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 554be456f11e..da6bbee878ca 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -205,6 +205,7 @@ union kvm_mmu_page_role {
205 unsigned invalid:1; 205 unsigned invalid:1;
206 unsigned nxe:1; 206 unsigned nxe:1;
207 unsigned cr0_wp:1; 207 unsigned cr0_wp:1;
208 unsigned smep_andnot_wp:1;
208 }; 209 };
209}; 210};
210 211
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 15afa1e1eaf9..da0f3b081076 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1985,8 +1985,17 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1985 spte |= PT_WRITABLE_MASK; 1985 spte |= PT_WRITABLE_MASK;
1986 1986
1987 if (!vcpu->arch.mmu.direct_map 1987 if (!vcpu->arch.mmu.direct_map
1988 && !(pte_access & ACC_WRITE_MASK)) 1988 && !(pte_access & ACC_WRITE_MASK)) {
1989 spte &= ~PT_USER_MASK; 1989 spte &= ~PT_USER_MASK;
1990 /*
1991 * If we converted a user page to a kernel page,
1992 * so that the kernel can write to it when cr0.wp=0,
1993 * then we should prevent the kernel from executing it
1994 * if SMEP is enabled.
1995 */
1996 if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
1997 spte |= PT64_NX_MASK;
1998 }
1990 1999
1991 /* 2000 /*
1992 * Optimization: for pte sync, if spte was writable the hash 2001 * Optimization: for pte sync, if spte was writable the hash
@@ -2955,6 +2964,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2955int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) 2964int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
2956{ 2965{
2957 int r; 2966 int r;
2967 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
2958 ASSERT(vcpu); 2968 ASSERT(vcpu);
2959 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 2969 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2960 2970
@@ -2969,6 +2979,8 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
2969 2979
2970 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); 2980 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2971 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); 2981 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
2982 vcpu->arch.mmu.base_role.smep_andnot_wp
2983 = smep && !is_write_protection(vcpu);
2972 2984
2973 return r; 2985 return r;
2974} 2986}