aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <guangrong.xiao@linux.intel.com>2016-02-24 04:51:11 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2016-03-03 08:36:21 -0500
commit3d0c27ad6ee465f174b09ee99fcaf189c57d567a (patch)
treee342d0a67b9b7fbc4e124aaa85f873b017a9edb8
parentf29d4d7810d7fd61442371cd68957e1d37ed79bb (diff)
KVM: MMU: let page fault handler be aware tracked page
The page fault caused by write access on the write tracked page can not be fixed, it always need to be emulated. page_fault_handle_page_track() is the fast path we introduce here to skip holding mmu-lock and shadow page table walking However, if the page table is not present, it is worth making the page table entry present and readonly to make the read access happy mmu_need_write_protect() need to be cooked to avoid page becoming writable when making page table present or sync/prefetch shadow page table entries Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_page_track.h2
-rw-r--r--arch/x86/kvm/mmu.c44
-rw-r--r--arch/x86/kvm/page_track.c15
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
4 files changed, 57 insertions, 7 deletions
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index e363e3040ba4..5f16e2864e73 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -17,4 +17,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
17void kvm_slot_page_track_remove_page(struct kvm *kvm, 17void kvm_slot_page_track_remove_page(struct kvm *kvm,
18 struct kvm_memory_slot *slot, gfn_t gfn, 18 struct kvm_memory_slot *slot, gfn_t gfn,
19 enum kvm_page_track_mode mode); 19 enum kvm_page_track_mode mode);
20bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
21 enum kvm_page_track_mode mode);
20#endif 22#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7184218acf78..dd8e3ca2d79b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -41,6 +41,7 @@
41#include <asm/cmpxchg.h> 41#include <asm/cmpxchg.h>
42#include <asm/io.h> 42#include <asm/io.h>
43#include <asm/vmx.h> 43#include <asm/vmx.h>
44#include <asm/kvm_page_track.h>
44 45
45/* 46/*
46 * When setting this variable to true it enables Two-Dimensional-Paging 47 * When setting this variable to true it enables Two-Dimensional-Paging
@@ -2448,25 +2449,29 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
2448 } 2449 }
2449} 2450}
2450 2451
2451static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 2452static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2452 bool can_unsync) 2453 bool can_unsync)
2453{ 2454{
2454 struct kvm_mmu_page *s; 2455 struct kvm_mmu_page *s;
2455 bool need_unsync = false; 2456 bool need_unsync = false;
2456 2457
2458 if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2459 return true;
2460
2457 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { 2461 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2458 if (!can_unsync) 2462 if (!can_unsync)
2459 return 1; 2463 return true;
2460 2464
2461 if (s->role.level != PT_PAGE_TABLE_LEVEL) 2465 if (s->role.level != PT_PAGE_TABLE_LEVEL)
2462 return 1; 2466 return true;
2463 2467
2464 if (!s->unsync) 2468 if (!s->unsync)
2465 need_unsync = true; 2469 need_unsync = true;
2466 } 2470 }
2467 if (need_unsync) 2471 if (need_unsync)
2468 kvm_unsync_pages(vcpu, gfn); 2472 kvm_unsync_pages(vcpu, gfn);
2469 return 0; 2473
2474 return false;
2470} 2475}
2471 2476
2472static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 2477static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
@@ -3381,21 +3386,43 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3381} 3386}
3382EXPORT_SYMBOL_GPL(handle_mmio_page_fault); 3387EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
3383 3388
3389static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3390 u32 error_code, gfn_t gfn)
3391{
3392 if (unlikely(error_code & PFERR_RSVD_MASK))
3393 return false;
3394
3395 if (!(error_code & PFERR_PRESENT_MASK) ||
3396 !(error_code & PFERR_WRITE_MASK))
3397 return false;
3398
3399 /*
3400 * guest is writing the page which is write tracked which can
3401 * not be fixed by page fault handler.
3402 */
3403 if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3404 return true;
3405
3406 return false;
3407}
3408
3384static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 3409static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3385 u32 error_code, bool prefault) 3410 u32 error_code, bool prefault)
3386{ 3411{
3387 gfn_t gfn; 3412 gfn_t gfn = gva >> PAGE_SHIFT;
3388 int r; 3413 int r;
3389 3414
3390 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); 3415 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
3391 3416
3417 if (page_fault_handle_page_track(vcpu, error_code, gfn))
3418 return 1;
3419
3392 r = mmu_topup_memory_caches(vcpu); 3420 r = mmu_topup_memory_caches(vcpu);
3393 if (r) 3421 if (r)
3394 return r; 3422 return r;
3395 3423
3396 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3424 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3397 3425
3398 gfn = gva >> PAGE_SHIFT;
3399 3426
3400 return nonpaging_map(vcpu, gva & PAGE_MASK, 3427 return nonpaging_map(vcpu, gva & PAGE_MASK,
3401 error_code, gfn, prefault); 3428 error_code, gfn, prefault);
@@ -3472,6 +3499,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3472 3499
3473 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3500 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3474 3501
3502 if (page_fault_handle_page_track(vcpu, error_code, gfn))
3503 return 1;
3504
3475 r = mmu_topup_memory_caches(vcpu); 3505 r = mmu_topup_memory_caches(vcpu);
3476 if (r) 3506 if (r)
3477 return r; 3507 return r;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index cd76bc318968..f127f6d04fa1 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -135,3 +135,18 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
135 */ 135 */
136 kvm_mmu_gfn_allow_lpage(slot, gfn); 136 kvm_mmu_gfn_allow_lpage(slot, gfn);
137} 137}
138
139/*
140 * check if the corresponding access on the specified guest page is tracked.
141 */
142bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
143 enum kvm_page_track_mode mode)
144{
145 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
146 int index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
147
148 if (WARN_ON(!page_track_mode_is_valid(mode)))
149 return false;
150
151 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
152}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 05827ff7bd2e..52ae2d94cc9e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -728,6 +728,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
728 return 0; 728 return 0;
729 } 729 }
730 730
731 if (page_fault_handle_page_track(vcpu, error_code, walker.gfn))
732 return 1;
733
731 vcpu->arch.write_fault_to_shadow_pgtable = false; 734 vcpu->arch.write_fault_to_shadow_pgtable = false;
732 735
733 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, 736 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,