aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h46
1 files changed, 36 insertions, 10 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 81eab9a50e6a..89d66ca4d87c 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -170,7 +170,7 @@ walk:
170 goto access_error; 170 goto access_error;
171 171
172#if PTTYPE == 64 172#if PTTYPE == 64
173 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK)) 173 if (fetch_fault && (pte & PT64_NX_MASK))
174 goto access_error; 174 goto access_error;
175#endif 175#endif
176 176
@@ -190,10 +190,10 @@ walk:
190 190
191 if ((walker->level == PT_PAGE_TABLE_LEVEL) || 191 if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
192 ((walker->level == PT_DIRECTORY_LEVEL) && 192 ((walker->level == PT_DIRECTORY_LEVEL) &&
193 (pte & PT_PAGE_SIZE_MASK) && 193 is_large_pte(pte) &&
194 (PTTYPE == 64 || is_pse(vcpu))) || 194 (PTTYPE == 64 || is_pse(vcpu))) ||
195 ((walker->level == PT_PDPE_LEVEL) && 195 ((walker->level == PT_PDPE_LEVEL) &&
196 (pte & PT_PAGE_SIZE_MASK) && 196 is_large_pte(pte) &&
197 is_long_mode(vcpu))) { 197 is_long_mode(vcpu))) {
198 int lvl = walker->level; 198 int lvl = walker->level;
199 199
@@ -258,11 +258,17 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
258 pt_element_t gpte; 258 pt_element_t gpte;
259 unsigned pte_access; 259 unsigned pte_access;
260 pfn_t pfn; 260 pfn_t pfn;
261 u64 new_spte;
261 262
262 gpte = *(const pt_element_t *)pte; 263 gpte = *(const pt_element_t *)pte;
263 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 264 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
264 if (!is_present_gpte(gpte)) 265 if (!is_present_gpte(gpte)) {
265 __set_spte(spte, shadow_notrap_nonpresent_pte); 266 if (page->unsync)
267 new_spte = shadow_trap_nonpresent_pte;
268 else
269 new_spte = shadow_notrap_nonpresent_pte;
270 __set_spte(spte, new_spte);
271 }
266 return; 272 return;
267 } 273 }
268 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 274 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
@@ -457,6 +463,7 @@ out_unlock:
457static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 463static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
458{ 464{
459 struct kvm_shadow_walk_iterator iterator; 465 struct kvm_shadow_walk_iterator iterator;
466 gpa_t pte_gpa = -1;
460 int level; 467 int level;
461 u64 *sptep; 468 u64 *sptep;
462 int need_flush = 0; 469 int need_flush = 0;
@@ -467,9 +474,16 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
467 level = iterator.level; 474 level = iterator.level;
468 sptep = iterator.sptep; 475 sptep = iterator.sptep;
469 476
470 if (level == PT_PAGE_TABLE_LEVEL || 477 if (is_last_spte(*sptep, level)) {
471 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || 478 struct kvm_mmu_page *sp = page_header(__pa(sptep));
472 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { 479 int offset, shift;
480
481 shift = PAGE_SHIFT -
482 (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
483 offset = sp->role.quadrant << shift;
484
485 pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
486 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
473 487
474 if (is_shadow_present_pte(*sptep)) { 488 if (is_shadow_present_pte(*sptep)) {
475 rmap_remove(vcpu->kvm, sptep); 489 rmap_remove(vcpu->kvm, sptep);
@@ -487,7 +501,17 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
487 501
488 if (need_flush) 502 if (need_flush)
489 kvm_flush_remote_tlbs(vcpu->kvm); 503 kvm_flush_remote_tlbs(vcpu->kvm);
504
505 atomic_inc(&vcpu->kvm->arch.invlpg_counter);
506
490 spin_unlock(&vcpu->kvm->mmu_lock); 507 spin_unlock(&vcpu->kvm->mmu_lock);
508
509 if (pte_gpa == -1)
510 return;
511
512 if (mmu_topup_memory_caches(vcpu))
513 return;
514 kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
491} 515}
492 516
493static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, 517static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
@@ -551,12 +575,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
551{ 575{
552 int i, offset, nr_present; 576 int i, offset, nr_present;
553 bool reset_host_protection; 577 bool reset_host_protection;
578 gpa_t first_pte_gpa;
554 579
555 offset = nr_present = 0; 580 offset = nr_present = 0;
556 581
557 if (PTTYPE == 32) 582 if (PTTYPE == 32)
558 offset = sp->role.quadrant << PT64_LEVEL_BITS; 583 offset = sp->role.quadrant << PT64_LEVEL_BITS;
559 584
585 first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
586
560 for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 587 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
561 unsigned pte_access; 588 unsigned pte_access;
562 pt_element_t gpte; 589 pt_element_t gpte;
@@ -566,8 +593,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
566 if (!is_shadow_present_pte(sp->spt[i])) 593 if (!is_shadow_present_pte(sp->spt[i]))
567 continue; 594 continue;
568 595
569 pte_gpa = gfn_to_gpa(sp->gfn); 596 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
570 pte_gpa += (i+offset) * sizeof(pt_element_t);
571 597
572 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, 598 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
573 sizeof(pt_element_t))) 599 sizeof(pt_element_t)))