aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/kvm/mmu.c38
-rw-r--r--arch/x86/kvm/mmu.h19
-rw-r--r--arch/x86/kvm/paging_tmpl.h22
-rw-r--r--arch/x86/kvm/x86.c11
5 files changed, 61 insertions, 36 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 64adb6117e19..3318bde206a5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -287,6 +287,13 @@ struct kvm_mmu {
287 union kvm_mmu_page_role base_role; 287 union kvm_mmu_page_role base_role;
288 bool direct_map; 288 bool direct_map;
289 289
290 /*
291 * Bitmap; bit set = permission fault
292 * Byte index: page fault error code [4:1]
293 * Bit index: pte permissions in ACC_* format
294 */
295 u8 permissions[16];
296
290 u64 *pae_root; 297 u64 *pae_root;
291 u64 *lm_root; 298 u64 *lm_root;
292 u64 rsvd_bits_mask[2][4]; 299 u64 rsvd_bits_mask[2][4];
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f297a2ccf4f6..9c6188931f87 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3516,6 +3516,38 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3516 } 3516 }
3517} 3517}
3518 3518
3519static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
3520{
3521 unsigned bit, byte, pfec;
3522 u8 map;
3523 bool fault, x, w, u, wf, uf, ff, smep;
3524
3525 smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
3526 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
3527 pfec = byte << 1;
3528 map = 0;
3529 wf = pfec & PFERR_WRITE_MASK;
3530 uf = pfec & PFERR_USER_MASK;
3531 ff = pfec & PFERR_FETCH_MASK;
3532 for (bit = 0; bit < 8; ++bit) {
3533 x = bit & ACC_EXEC_MASK;
3534 w = bit & ACC_WRITE_MASK;
3535 u = bit & ACC_USER_MASK;
3536
3537 /* Not really needed: !nx will cause pte.nx to fault */
3538 x |= !mmu->nx;
3539 /* Allow supervisor writes if !cr0.wp */
3540 w |= !is_write_protection(vcpu) && !uf;
3541 /* Disallow supervisor fetches of user code if cr4.smep */
3542 x &= !(smep && u && !uf);
3543
3544 fault = (ff && !x) || (uf && !u) || (wf && !w);
3545 map |= fault << bit;
3546 }
3547 mmu->permissions[byte] = map;
3548 }
3549}
3550
3519static int paging64_init_context_common(struct kvm_vcpu *vcpu, 3551static int paging64_init_context_common(struct kvm_vcpu *vcpu,
3520 struct kvm_mmu *context, 3552 struct kvm_mmu *context,
3521 int level) 3553 int level)
@@ -3524,6 +3556,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
3524 context->root_level = level; 3556 context->root_level = level;
3525 3557
3526 reset_rsvds_bits_mask(vcpu, context); 3558 reset_rsvds_bits_mask(vcpu, context);
3559 update_permission_bitmask(vcpu, context);
3527 3560
3528 ASSERT(is_pae(vcpu)); 3561 ASSERT(is_pae(vcpu));
3529 context->new_cr3 = paging_new_cr3; 3562 context->new_cr3 = paging_new_cr3;
@@ -3552,6 +3585,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
3552 context->root_level = PT32_ROOT_LEVEL; 3585 context->root_level = PT32_ROOT_LEVEL;
3553 3586
3554 reset_rsvds_bits_mask(vcpu, context); 3587 reset_rsvds_bits_mask(vcpu, context);
3588 update_permission_bitmask(vcpu, context);
3555 3589
3556 context->new_cr3 = paging_new_cr3; 3590 context->new_cr3 = paging_new_cr3;
3557 context->page_fault = paging32_page_fault; 3591 context->page_fault = paging32_page_fault;
@@ -3612,6 +3646,8 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
3612 context->gva_to_gpa = paging32_gva_to_gpa; 3646 context->gva_to_gpa = paging32_gva_to_gpa;
3613 } 3647 }
3614 3648
3649 update_permission_bitmask(vcpu, context);
3650
3615 return 0; 3651 return 0;
3616} 3652}
3617 3653
@@ -3687,6 +3723,8 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
3687 g_context->gva_to_gpa = paging32_gva_to_gpa_nested; 3723 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
3688 } 3724 }
3689 3725
3726 update_permission_bitmask(vcpu, g_context);
3727
3690 return 0; 3728 return 0;
3691} 3729}
3692 3730
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 2832081e9b2e..584660775d08 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -89,17 +89,14 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
89 return kvm_read_cr0_bits(vcpu, X86_CR0_WP); 89 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
90} 90}
91 91
92static inline bool check_write_user_access(struct kvm_vcpu *vcpu, 92/*
93 bool write_fault, bool user_fault, 93 * Will a fault with a given page-fault error code (pfec) cause a permission
94 unsigned long pte) 94 * fault with the given access (in ACC_* format)?
95 */
96static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access,
97 unsigned pfec)
95{ 98{
96 if (unlikely(write_fault && !is_writable_pte(pte) 99 return (mmu->permissions[pfec >> 1] >> pte_access) & 1;
97 && (user_fault || is_write_protection(vcpu))))
98 return false;
99
100 if (unlikely(user_fault && !(pte & PT_USER_MASK)))
101 return false;
102
103 return true;
104} 100}
101
105#endif 102#endif
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 35a05dd2f69c..8f6c59fadbbe 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -169,7 +169,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
169 pt_element_t pte; 169 pt_element_t pte;
170 pt_element_t __user *uninitialized_var(ptep_user); 170 pt_element_t __user *uninitialized_var(ptep_user);
171 gfn_t table_gfn; 171 gfn_t table_gfn;
172 unsigned index, pt_access, uninitialized_var(pte_access); 172 unsigned index, pt_access, pte_access;
173 gpa_t pte_gpa; 173 gpa_t pte_gpa;
174 bool eperm, last_gpte; 174 bool eperm, last_gpte;
175 int offset; 175 int offset;
@@ -237,24 +237,9 @@ retry_walk:
237 goto error; 237 goto error;
238 } 238 }
239 239
240 if (!check_write_user_access(vcpu, write_fault, user_fault, 240 pte_access = pt_access & gpte_access(vcpu, pte);
241 pte))
242 eperm = true;
243
244#if PTTYPE == 64
245 if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
246 eperm = true;
247#endif
248 241
249 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte); 242 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
250 if (last_gpte) {
251 pte_access = pt_access & gpte_access(vcpu, pte);
252 /* check if the kernel is fetching from user page */
253 if (unlikely(pte_access & PT_USER_MASK) &&
254 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
255 if (fetch_fault && !user_fault)
256 eperm = true;
257 }
258 243
259 walker->ptes[walker->level - 1] = pte; 244 walker->ptes[walker->level - 1] = pte;
260 245
@@ -284,10 +269,11 @@ retry_walk:
284 break; 269 break;
285 } 270 }
286 271
287 pt_access &= gpte_access(vcpu, pte); 272 pt_access &= pte_access;
288 --walker->level; 273 --walker->level;
289 } 274 }
290 275
276 eperm |= permission_fault(mmu, pte_access, access);
291 if (unlikely(eperm)) { 277 if (unlikely(eperm)) {
292 errcode |= PFERR_PRESENT_MASK; 278 errcode |= PFERR_PRESENT_MASK;
293 goto error; 279 goto error;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 19047eafa38d..497226e49d4b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3672,20 +3672,17 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
3672 gpa_t *gpa, struct x86_exception *exception, 3672 gpa_t *gpa, struct x86_exception *exception,
3673 bool write) 3673 bool write)
3674{ 3674{
3675 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3675 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
3676 | (write ? PFERR_WRITE_MASK : 0);
3676 3677
3677 if (vcpu_match_mmio_gva(vcpu, gva) && 3678 if (vcpu_match_mmio_gva(vcpu, gva)
3678 check_write_user_access(vcpu, write, access, 3679 && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) {
3679 vcpu->arch.access)) {
3680 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 3680 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
3681 (gva & (PAGE_SIZE - 1)); 3681 (gva & (PAGE_SIZE - 1));
3682 trace_vcpu_match_mmio(gva, *gpa, write, false); 3682 trace_vcpu_match_mmio(gva, *gpa, write, false);
3683 return 1; 3683 return 1;
3684 } 3684 }
3685 3685
3686 if (write)
3687 access |= PFERR_WRITE_MASK;
3688
3689 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 3686 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3690 3687
3691 if (*gpa == UNMAPPED_GVA) 3688 if (*gpa == UNMAPPED_GVA)