diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-10-16 08:09:36 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-10-17 10:39:18 -0400 |
commit | a052b42b0e618f34ca891f00b4e8b8ac0e4b80c0 (patch) | |
tree | 7cfe89fc71bc93b056bacd78af40333ba8812ef5 /arch/x86/kvm/mmu.c | |
parent | d4878f24e32f5ea5330e6a48977c8997396bc014 (diff) |
KVM: MMU: move prefetch_invalid_gpte out of pagaing_tmp.h
The function does not depend on guest mmu mode, move it out from
paging_tmpl.h
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 36 |
1 files changed, 28 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7fe1e038be21..3d5ca7939380 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2506,6 +2506,14 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | |||
2506 | mmu_free_roots(vcpu); | 2506 | mmu_free_roots(vcpu); |
2507 | } | 2507 | } |
2508 | 2508 | ||
2509 | static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) | ||
2510 | { | ||
2511 | int bit7; | ||
2512 | |||
2513 | bit7 = (gpte >> 7) & 1; | ||
2514 | return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; | ||
2515 | } | ||
2516 | |||
2509 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, | 2517 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
2510 | bool no_dirty_log) | 2518 | bool no_dirty_log) |
2511 | { | 2519 | { |
@@ -2518,6 +2526,26 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
2518 | return gfn_to_pfn_memslot_atomic(slot, gfn); | 2526 | return gfn_to_pfn_memslot_atomic(slot, gfn); |
2519 | } | 2527 | } |
2520 | 2528 | ||
2529 | static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu, | ||
2530 | struct kvm_mmu_page *sp, u64 *spte, | ||
2531 | u64 gpte) | ||
2532 | { | ||
2533 | if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) | ||
2534 | goto no_present; | ||
2535 | |||
2536 | if (!is_present_gpte(gpte)) | ||
2537 | goto no_present; | ||
2538 | |||
2539 | if (!(gpte & PT_ACCESSED_MASK)) | ||
2540 | goto no_present; | ||
2541 | |||
2542 | return false; | ||
2543 | |||
2544 | no_present: | ||
2545 | drop_spte(vcpu->kvm, spte); | ||
2546 | return true; | ||
2547 | } | ||
2548 | |||
2521 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, | 2549 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, |
2522 | struct kvm_mmu_page *sp, | 2550 | struct kvm_mmu_page *sp, |
2523 | u64 *start, u64 *end) | 2551 | u64 *start, u64 *end) |
@@ -3395,14 +3423,6 @@ static void paging_free(struct kvm_vcpu *vcpu) | |||
3395 | nonpaging_free(vcpu); | 3423 | nonpaging_free(vcpu); |
3396 | } | 3424 | } |
3397 | 3425 | ||
3398 | static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) | ||
3399 | { | ||
3400 | int bit7; | ||
3401 | |||
3402 | bit7 = (gpte >> 7) & 1; | ||
3403 | return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; | ||
3404 | } | ||
3405 | |||
3406 | static inline void protect_clean_gpte(unsigned *access, unsigned gpte) | 3426 | static inline void protect_clean_gpte(unsigned *access, unsigned gpte) |
3407 | { | 3427 | { |
3408 | unsigned mask; | 3428 | unsigned mask; |