diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-12-06 21:48:06 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:30:38 -0500 |
commit | 78b2c54aa4a7e9e4257d2b8e3a4b96d2d0c6e636 (patch) | |
tree | fc8148e162a63ae8478dcd144f42428ad7bae3f0 /arch/x86/kvm | |
parent | b53ba3f9cc0b5ac21a86a95c702768f871b02610 (diff) |
KVM: MMU: rename 'no_apf' to 'prefault'
It's the speculative path if 'no_apf = 1' and we will specially handle this
speculative path in the later patch, so 'prefault' is better to fit the sense.
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 18 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 4 |
2 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 482a5c0c48db..83d45cf0a61c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2284,11 +2284,11 @@ static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) | |||
2284 | return 1; | 2284 | return 1; |
2285 | } | 2285 | } |
2286 | 2286 | ||
2287 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | 2287 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
2288 | gva_t gva, pfn_t *pfn, bool write, bool *writable); | 2288 | gva_t gva, pfn_t *pfn, bool write, bool *writable); |
2289 | 2289 | ||
2290 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, | 2290 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, |
2291 | bool no_apf) | 2291 | bool prefault) |
2292 | { | 2292 | { |
2293 | int r; | 2293 | int r; |
2294 | int level; | 2294 | int level; |
@@ -2310,7 +2310,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, | |||
2310 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 2310 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
2311 | smp_rmb(); | 2311 | smp_rmb(); |
2312 | 2312 | ||
2313 | if (try_async_pf(vcpu, no_apf, gfn, v, &pfn, write, &map_writable)) | 2313 | if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) |
2314 | return 0; | 2314 | return 0; |
2315 | 2315 | ||
2316 | /* mmio */ | 2316 | /* mmio */ |
@@ -2584,7 +2584,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, | |||
2584 | } | 2584 | } |
2585 | 2585 | ||
2586 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | 2586 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
2587 | u32 error_code, bool no_apf) | 2587 | u32 error_code, bool prefault) |
2588 | { | 2588 | { |
2589 | gfn_t gfn; | 2589 | gfn_t gfn; |
2590 | int r; | 2590 | int r; |
@@ -2600,7 +2600,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
2600 | gfn = gva >> PAGE_SHIFT; | 2600 | gfn = gva >> PAGE_SHIFT; |
2601 | 2601 | ||
2602 | return nonpaging_map(vcpu, gva & PAGE_MASK, | 2602 | return nonpaging_map(vcpu, gva & PAGE_MASK, |
2603 | error_code & PFERR_WRITE_MASK, gfn, no_apf); | 2603 | error_code & PFERR_WRITE_MASK, gfn, prefault); |
2604 | } | 2604 | } |
2605 | 2605 | ||
2606 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) | 2606 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) |
@@ -2622,7 +2622,7 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu) | |||
2622 | return kvm_x86_ops->interrupt_allowed(vcpu); | 2622 | return kvm_x86_ops->interrupt_allowed(vcpu); |
2623 | } | 2623 | } |
2624 | 2624 | ||
2625 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | 2625 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
2626 | gva_t gva, pfn_t *pfn, bool write, bool *writable) | 2626 | gva_t gva, pfn_t *pfn, bool write, bool *writable) |
2627 | { | 2627 | { |
2628 | bool async; | 2628 | bool async; |
@@ -2634,7 +2634,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | |||
2634 | 2634 | ||
2635 | put_page(pfn_to_page(*pfn)); | 2635 | put_page(pfn_to_page(*pfn)); |
2636 | 2636 | ||
2637 | if (!no_apf && can_do_async_pf(vcpu)) { | 2637 | if (!prefault && can_do_async_pf(vcpu)) { |
2638 | trace_kvm_try_async_get_page(gva, gfn); | 2638 | trace_kvm_try_async_get_page(gva, gfn); |
2639 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { | 2639 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
2640 | trace_kvm_async_pf_doublefault(gva, gfn); | 2640 | trace_kvm_async_pf_doublefault(gva, gfn); |
@@ -2650,7 +2650,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | |||
2650 | } | 2650 | } |
2651 | 2651 | ||
2652 | static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | 2652 | static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, |
2653 | bool no_apf) | 2653 | bool prefault) |
2654 | { | 2654 | { |
2655 | pfn_t pfn; | 2655 | pfn_t pfn; |
2656 | int r; | 2656 | int r; |
@@ -2674,7 +2674,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
2674 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 2674 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
2675 | smp_rmb(); | 2675 | smp_rmb(); |
2676 | 2676 | ||
2677 | if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn, write, &map_writable)) | 2677 | if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) |
2678 | return 0; | 2678 | return 0; |
2679 | 2679 | ||
2680 | /* mmio */ | 2680 | /* mmio */ |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index d5a0a11d33a1..52b3e91918c6 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -539,7 +539,7 @@ out_gpte_changed: | |||
539 | * a negative value on error. | 539 | * a negative value on error. |
540 | */ | 540 | */ |
541 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | 541 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, |
542 | bool no_apf) | 542 | bool prefault) |
543 | { | 543 | { |
544 | int write_fault = error_code & PFERR_WRITE_MASK; | 544 | int write_fault = error_code & PFERR_WRITE_MASK; |
545 | int user_fault = error_code & PFERR_USER_MASK; | 545 | int user_fault = error_code & PFERR_USER_MASK; |
@@ -581,7 +581,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | |||
581 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 581 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
582 | smp_rmb(); | 582 | smp_rmb(); |
583 | 583 | ||
584 | if (try_async_pf(vcpu, no_apf, walker.gfn, addr, &pfn, write_fault, | 584 | if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, |
585 | &map_writable)) | 585 | &map_writable)) |
586 | return 0; | 586 | return 0; |
587 | 587 | ||