diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-10-16 08:09:36 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-10-17 10:39:18 -0400 |
commit | a052b42b0e618f34ca891f00b4e8b8ac0e4b80c0 (patch) | |
tree | 7cfe89fc71bc93b056bacd78af40333ba8812ef5 | |
parent | d4878f24e32f5ea5330e6a48977c8997396bc014 (diff) |
KVM: MMU: move prefetch_invalid_gpte out of pagaing_tmp.h
The function does not depend on guest mmu mode, move it out from
paging_tmpl.h
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu.c | 36 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 26 |
2 files changed, 31 insertions, 31 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7fe1e038be2..3d5ca793938 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2506,6 +2506,14 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) | |||
2506 | mmu_free_roots(vcpu); | 2506 | mmu_free_roots(vcpu); |
2507 | } | 2507 | } |
2508 | 2508 | ||
2509 | static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) | ||
2510 | { | ||
2511 | int bit7; | ||
2512 | |||
2513 | bit7 = (gpte >> 7) & 1; | ||
2514 | return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; | ||
2515 | } | ||
2516 | |||
2509 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, | 2517 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
2510 | bool no_dirty_log) | 2518 | bool no_dirty_log) |
2511 | { | 2519 | { |
@@ -2518,6 +2526,26 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
2518 | return gfn_to_pfn_memslot_atomic(slot, gfn); | 2526 | return gfn_to_pfn_memslot_atomic(slot, gfn); |
2519 | } | 2527 | } |
2520 | 2528 | ||
2529 | static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu, | ||
2530 | struct kvm_mmu_page *sp, u64 *spte, | ||
2531 | u64 gpte) | ||
2532 | { | ||
2533 | if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) | ||
2534 | goto no_present; | ||
2535 | |||
2536 | if (!is_present_gpte(gpte)) | ||
2537 | goto no_present; | ||
2538 | |||
2539 | if (!(gpte & PT_ACCESSED_MASK)) | ||
2540 | goto no_present; | ||
2541 | |||
2542 | return false; | ||
2543 | |||
2544 | no_present: | ||
2545 | drop_spte(vcpu->kvm, spte); | ||
2546 | return true; | ||
2547 | } | ||
2548 | |||
2521 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, | 2549 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, |
2522 | struct kvm_mmu_page *sp, | 2550 | struct kvm_mmu_page *sp, |
2523 | u64 *start, u64 *end) | 2551 | u64 *start, u64 *end) |
@@ -3395,14 +3423,6 @@ static void paging_free(struct kvm_vcpu *vcpu) | |||
3395 | nonpaging_free(vcpu); | 3423 | nonpaging_free(vcpu); |
3396 | } | 3424 | } |
3397 | 3425 | ||
3398 | static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) | ||
3399 | { | ||
3400 | int bit7; | ||
3401 | |||
3402 | bit7 = (gpte >> 7) & 1; | ||
3403 | return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; | ||
3404 | } | ||
3405 | |||
3406 | static inline void protect_clean_gpte(unsigned *access, unsigned gpte) | 3426 | static inline void protect_clean_gpte(unsigned *access, unsigned gpte) |
3407 | { | 3427 | { |
3408 | unsigned mask; | 3428 | unsigned mask; |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index c5555329c73..36a80edf800 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -305,26 +305,6 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker, | |||
305 | addr, access); | 305 | addr, access); |
306 | } | 306 | } |
307 | 307 | ||
308 | static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, | ||
309 | struct kvm_mmu_page *sp, u64 *spte, | ||
310 | pt_element_t gpte) | ||
311 | { | ||
312 | if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) | ||
313 | goto no_present; | ||
314 | |||
315 | if (!is_present_gpte(gpte)) | ||
316 | goto no_present; | ||
317 | |||
318 | if (!(gpte & PT_ACCESSED_MASK)) | ||
319 | goto no_present; | ||
320 | |||
321 | return false; | ||
322 | |||
323 | no_present: | ||
324 | drop_spte(vcpu->kvm, spte); | ||
325 | return true; | ||
326 | } | ||
327 | |||
328 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 308 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
329 | u64 *spte, const void *pte) | 309 | u64 *spte, const void *pte) |
330 | { | 310 | { |
@@ -333,7 +313,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
333 | pfn_t pfn; | 313 | pfn_t pfn; |
334 | 314 | ||
335 | gpte = *(const pt_element_t *)pte; | 315 | gpte = *(const pt_element_t *)pte; |
336 | if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) | 316 | if (prefetch_invalid_gpte(vcpu, sp, spte, gpte)) |
337 | return; | 317 | return; |
338 | 318 | ||
339 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); | 319 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); |
@@ -408,7 +388,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, | |||
408 | 388 | ||
409 | gpte = gptep[i]; | 389 | gpte = gptep[i]; |
410 | 390 | ||
411 | if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) | 391 | if (prefetch_invalid_gpte(vcpu, sp, spte, gpte)) |
412 | continue; | 392 | continue; |
413 | 393 | ||
414 | pte_access = sp->role.access & gpte_access(vcpu, gpte); | 394 | pte_access = sp->role.access & gpte_access(vcpu, gpte); |
@@ -751,7 +731,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
751 | sizeof(pt_element_t))) | 731 | sizeof(pt_element_t))) |
752 | return -EINVAL; | 732 | return -EINVAL; |
753 | 733 | ||
754 | if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { | 734 | if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) { |
755 | vcpu->kvm->tlbs_dirty++; | 735 | vcpu->kvm->tlbs_dirty++; |
756 | continue; | 736 | continue; |
757 | } | 737 | } |