aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/mmu.h3
-rw-r--r--arch/x86/kvm/paging_tmpl.h24
3 files changed, 26 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index aa0b469ee07d..54c9cb4fdfa4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3408,6 +3408,18 @@ static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
3408 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; 3408 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
3409} 3409}
3410 3410
3411static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
3412{
3413 unsigned mask;
3414
3415 BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
3416
3417 mask = (unsigned)~ACC_WRITE_MASK;
3418 /* Allow write access to dirty gptes */
3419 mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK;
3420 *access &= mask;
3421}
3422
3411static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, 3423static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
3412 int *nr_present) 3424 int *nr_present)
3413{ 3425{
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index e374db9af021..2832081e9b2e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -18,7 +18,8 @@
18#define PT_PCD_MASK (1ULL << 4) 18#define PT_PCD_MASK (1ULL << 4)
19#define PT_ACCESSED_SHIFT 5 19#define PT_ACCESSED_SHIFT 5
20#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) 20#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
21#define PT_DIRTY_MASK (1ULL << 6) 21#define PT_DIRTY_SHIFT 6
22#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
22#define PT_PAGE_SIZE_MASK (1ULL << 7) 23#define PT_PAGE_SIZE_MASK (1ULL << 7)
23#define PT_PAT_MASK (1ULL << 7) 24#define PT_PAT_MASK (1ULL << 7)
24#define PT_GLOBAL_MASK (1ULL << 8) 25#define PT_GLOBAL_MASK (1ULL << 8)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bf8c42bf50fe..bf7b4ffafab8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -101,14 +101,11 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
101 return (ret != orig_pte); 101 return (ret != orig_pte);
102} 102}
103 103
104static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte, 104static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
105 bool last)
106{ 105{
107 unsigned access; 106 unsigned access;
108 107
109 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; 108 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
110 if (last && !is_dirty_gpte(gpte))
111 access &= ~ACC_WRITE_MASK;
112 109
113#if PTTYPE == 64 110#if PTTYPE == 64
114 if (vcpu->arch.mmu.nx) 111 if (vcpu->arch.mmu.nx)
@@ -222,8 +219,7 @@ retry_walk:
222 219
223 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte); 220 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
224 if (last_gpte) { 221 if (last_gpte) {
225 pte_access = pt_access & 222 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
226 FNAME(gpte_access)(vcpu, pte, true);
227 /* check if the kernel is fetching from user page */ 223 /* check if the kernel is fetching from user page */
228 if (unlikely(pte_access & PT_USER_MASK) && 224 if (unlikely(pte_access & PT_USER_MASK) &&
229 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) 225 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
@@ -274,7 +270,7 @@ retry_walk:
274 break; 270 break;
275 } 271 }
276 272
277 pt_access &= FNAME(gpte_access)(vcpu, pte, false); 273 pt_access &= FNAME(gpte_access)(vcpu, pte);
278 --walker->level; 274 --walker->level;
279 } 275 }
280 276
@@ -283,7 +279,9 @@ retry_walk:
283 goto error; 279 goto error;
284 } 280 }
285 281
286 if (write_fault && unlikely(!is_dirty_gpte(pte))) { 282 if (!write_fault)
283 protect_clean_gpte(&pte_access, pte);
284 else if (unlikely(!is_dirty_gpte(pte))) {
287 int ret; 285 int ret;
288 286
289 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); 287 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
@@ -368,7 +366,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
368 return; 366 return;
369 367
370 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 368 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
371 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); 369 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
370 protect_clean_gpte(&pte_access, gpte);
372 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); 371 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
373 if (mmu_invalid_pfn(pfn)) 372 if (mmu_invalid_pfn(pfn))
374 return; 373 return;
@@ -441,8 +440,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
441 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) 440 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
442 continue; 441 continue;
443 442
444 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, 443 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
445 true); 444 protect_clean_gpte(&pte_access, gpte);
446 gfn = gpte_to_gfn(gpte); 445 gfn = gpte_to_gfn(gpte);
447 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 446 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
448 pte_access & ACC_WRITE_MASK); 447 pte_access & ACC_WRITE_MASK);
@@ -794,7 +793,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
794 793
795 gfn = gpte_to_gfn(gpte); 794 gfn = gpte_to_gfn(gpte);
796 pte_access = sp->role.access; 795 pte_access = sp->role.access;
797 pte_access &= FNAME(gpte_access)(vcpu, gpte, true); 796 pte_access &= FNAME(gpte_access)(vcpu, gpte);
797 protect_clean_gpte(&pte_access, gpte);
798 798
799 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present)) 799 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
800 continue; 800 continue;