diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 65 |
1 files changed, 44 insertions, 21 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 6f85fe0bf958..01d7c2ad05f5 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2382,12 +2382,20 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2382 | || (!vcpu->arch.mmu.direct_map && write_fault | 2382 | || (!vcpu->arch.mmu.direct_map && write_fault |
2383 | && !is_write_protection(vcpu) && !user_fault)) { | 2383 | && !is_write_protection(vcpu) && !user_fault)) { |
2384 | 2384 | ||
2385 | /* | ||
2386 | * There are two cases: | ||
2387 | * - the one is other vcpu creates new sp in the window | ||
2388 | * between mapping_level() and acquiring mmu-lock. | ||
2389 | * - the another case is the new sp is created by itself | ||
2390 | * (page-fault path) when guest uses the target gfn as | ||
2391 | * its page table. | ||
2392 | * Both of these cases can be fixed by allowing guest to | ||
2393 | * retry the access, it will refault, then we can establish | ||
2394 | * the mapping by using small page. | ||
2395 | */ | ||
2385 | if (level > PT_PAGE_TABLE_LEVEL && | 2396 | if (level > PT_PAGE_TABLE_LEVEL && |
2386 | has_wrprotected_page(vcpu->kvm, gfn, level)) { | 2397 | has_wrprotected_page(vcpu->kvm, gfn, level)) |
2387 | ret = 1; | ||
2388 | drop_spte(vcpu->kvm, sptep); | ||
2389 | goto done; | 2398 | goto done; |
2390 | } | ||
2391 | 2399 | ||
2392 | spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; | 2400 | spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; |
2393 | 2401 | ||
@@ -2505,6 +2513,14 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | |||
2505 | mmu_free_roots(vcpu); | 2513 | mmu_free_roots(vcpu); |
2506 | } | 2514 | } |
2507 | 2515 | ||
2516 | static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) | ||
2517 | { | ||
2518 | int bit7; | ||
2519 | |||
2520 | bit7 = (gpte >> 7) & 1; | ||
2521 | return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; | ||
2522 | } | ||
2523 | |||
2508 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, | 2524 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
2509 | bool no_dirty_log) | 2525 | bool no_dirty_log) |
2510 | { | 2526 | { |
@@ -2517,6 +2533,26 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
2517 | return gfn_to_pfn_memslot_atomic(slot, gfn); | 2533 | return gfn_to_pfn_memslot_atomic(slot, gfn); |
2518 | } | 2534 | } |
2519 | 2535 | ||
2536 | static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu, | ||
2537 | struct kvm_mmu_page *sp, u64 *spte, | ||
2538 | u64 gpte) | ||
2539 | { | ||
2540 | if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) | ||
2541 | goto no_present; | ||
2542 | |||
2543 | if (!is_present_gpte(gpte)) | ||
2544 | goto no_present; | ||
2545 | |||
2546 | if (!(gpte & PT_ACCESSED_MASK)) | ||
2547 | goto no_present; | ||
2548 | |||
2549 | return false; | ||
2550 | |||
2551 | no_present: | ||
2552 | drop_spte(vcpu->kvm, spte); | ||
2553 | return true; | ||
2554 | } | ||
2555 | |||
2520 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, | 2556 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, |
2521 | struct kvm_mmu_page *sp, | 2557 | struct kvm_mmu_page *sp, |
2522 | u64 *start, u64 *end) | 2558 | u64 *start, u64 *end) |
@@ -2671,7 +2707,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, | |||
2671 | * PT_PAGE_TABLE_LEVEL and there would be no adjustment done | 2707 | * PT_PAGE_TABLE_LEVEL and there would be no adjustment done |
2672 | * here. | 2708 | * here. |
2673 | */ | 2709 | */ |
2674 | if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && | 2710 | if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && |
2675 | level == PT_PAGE_TABLE_LEVEL && | 2711 | level == PT_PAGE_TABLE_LEVEL && |
2676 | PageTransCompound(pfn_to_page(pfn)) && | 2712 | PageTransCompound(pfn_to_page(pfn)) && |
2677 | !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { | 2713 | !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { |
@@ -2699,18 +2735,13 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, | |||
2699 | } | 2735 | } |
2700 | } | 2736 | } |
2701 | 2737 | ||
2702 | static bool mmu_invalid_pfn(pfn_t pfn) | ||
2703 | { | ||
2704 | return unlikely(is_invalid_pfn(pfn)); | ||
2705 | } | ||
2706 | |||
2707 | static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, | 2738 | static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, |
2708 | pfn_t pfn, unsigned access, int *ret_val) | 2739 | pfn_t pfn, unsigned access, int *ret_val) |
2709 | { | 2740 | { |
2710 | bool ret = true; | 2741 | bool ret = true; |
2711 | 2742 | ||
2712 | /* The pfn is invalid, report the error! */ | 2743 | /* The pfn is invalid, report the error! */ |
2713 | if (unlikely(is_invalid_pfn(pfn))) { | 2744 | if (unlikely(is_error_pfn(pfn))) { |
2714 | *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); | 2745 | *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); |
2715 | goto exit; | 2746 | goto exit; |
2716 | } | 2747 | } |
@@ -2862,7 +2893,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, | |||
2862 | return r; | 2893 | return r; |
2863 | 2894 | ||
2864 | spin_lock(&vcpu->kvm->mmu_lock); | 2895 | spin_lock(&vcpu->kvm->mmu_lock); |
2865 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 2896 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
2866 | goto out_unlock; | 2897 | goto out_unlock; |
2867 | kvm_mmu_free_some_pages(vcpu); | 2898 | kvm_mmu_free_some_pages(vcpu); |
2868 | if (likely(!force_pt_level)) | 2899 | if (likely(!force_pt_level)) |
@@ -3331,7 +3362,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
3331 | return r; | 3362 | return r; |
3332 | 3363 | ||
3333 | spin_lock(&vcpu->kvm->mmu_lock); | 3364 | spin_lock(&vcpu->kvm->mmu_lock); |
3334 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 3365 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
3335 | goto out_unlock; | 3366 | goto out_unlock; |
3336 | kvm_mmu_free_some_pages(vcpu); | 3367 | kvm_mmu_free_some_pages(vcpu); |
3337 | if (likely(!force_pt_level)) | 3368 | if (likely(!force_pt_level)) |
@@ -3399,14 +3430,6 @@ static void paging_free(struct kvm_vcpu *vcpu) | |||
3399 | nonpaging_free(vcpu); | 3430 | nonpaging_free(vcpu); |
3400 | } | 3431 | } |
3401 | 3432 | ||
3402 | static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) | ||
3403 | { | ||
3404 | int bit7; | ||
3405 | |||
3406 | bit7 = (gpte >> 7) & 1; | ||
3407 | return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; | ||
3408 | } | ||
3409 | |||
3410 | static inline void protect_clean_gpte(unsigned *access, unsigned gpte) | 3433 | static inline void protect_clean_gpte(unsigned *access, unsigned gpte) |
3411 | { | 3434 | { |
3412 | unsigned mask; | 3435 | unsigned mask; |