diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d43867c33bc4..44a7d2515497 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, | |||
3736 | } | 3736 | } |
3737 | } | 3737 | } |
3738 | 3738 | ||
3739 | void update_permission_bitmask(struct kvm_vcpu *vcpu, | 3739 | static void update_permission_bitmask(struct kvm_vcpu *vcpu, |
3740 | struct kvm_mmu *mmu, bool ept) | 3740 | struct kvm_mmu *mmu, bool ept) |
3741 | { | 3741 | { |
3742 | unsigned bit, byte, pfec; | 3742 | unsigned bit, byte, pfec; |
3743 | u8 map; | 3743 | u8 map; |
@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) | |||
3918 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) | 3918 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) |
3919 | { | 3919 | { |
3920 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); | 3920 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
3921 | bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); | ||
3921 | struct kvm_mmu *context = &vcpu->arch.mmu; | 3922 | struct kvm_mmu *context = &vcpu->arch.mmu; |
3922 | 3923 | ||
3923 | MMU_WARN_ON(VALID_PAGE(context->root_hpa)); | 3924 | MMU_WARN_ON(VALID_PAGE(context->root_hpa)); |
@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) | |||
3936 | context->base_role.cr0_wp = is_write_protection(vcpu); | 3937 | context->base_role.cr0_wp = is_write_protection(vcpu); |
3937 | context->base_role.smep_andnot_wp | 3938 | context->base_role.smep_andnot_wp |
3938 | = smep && !is_write_protection(vcpu); | 3939 | = smep && !is_write_protection(vcpu); |
3940 | context->base_role.smap_andnot_wp | ||
3941 | = smap && !is_write_protection(vcpu); | ||
3939 | } | 3942 | } |
3940 | EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); | 3943 | EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); |
3941 | 3944 | ||
@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
4207 | const u8 *new, int bytes) | 4210 | const u8 *new, int bytes) |
4208 | { | 4211 | { |
4209 | gfn_t gfn = gpa >> PAGE_SHIFT; | 4212 | gfn_t gfn = gpa >> PAGE_SHIFT; |
4210 | union kvm_mmu_page_role mask = { .word = 0 }; | ||
4211 | struct kvm_mmu_page *sp; | 4213 | struct kvm_mmu_page *sp; |
4212 | LIST_HEAD(invalid_list); | 4214 | LIST_HEAD(invalid_list); |
4213 | u64 entry, gentry, *spte; | 4215 | u64 entry, gentry, *spte; |
4214 | int npte; | 4216 | int npte; |
4215 | bool remote_flush, local_flush, zap_page; | 4217 | bool remote_flush, local_flush, zap_page; |
4218 | union kvm_mmu_page_role mask = (union kvm_mmu_page_role) { | ||
4219 | .cr0_wp = 1, | ||
4220 | .cr4_pae = 1, | ||
4221 | .nxe = 1, | ||
4222 | .smep_andnot_wp = 1, | ||
4223 | .smap_andnot_wp = 1, | ||
4224 | }; | ||
4216 | 4225 | ||
4217 | /* | 4226 | /* |
4218 | * If we don't have indirect shadow pages, it means no page is | 4227 | * If we don't have indirect shadow pages, it means no page is |
@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
4238 | ++vcpu->kvm->stat.mmu_pte_write; | 4247 | ++vcpu->kvm->stat.mmu_pte_write; |
4239 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); | 4248 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
4240 | 4249 | ||
4241 | mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; | ||
4242 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { | 4250 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
4243 | if (detect_write_misaligned(sp, gpa, bytes) || | 4251 | if (detect_write_misaligned(sp, gpa, bytes) || |
4244 | detect_write_flooding(sp)) { | 4252 | detect_write_flooding(sp)) { |