diff options
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 18 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 4 |
3 files changed, 13 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d968cc501799..aa1518d794cc 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -241,7 +241,8 @@ struct kvm_mmu { | |||
241 | void (*new_cr3)(struct kvm_vcpu *vcpu); | 241 | void (*new_cr3)(struct kvm_vcpu *vcpu); |
242 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); | 242 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); |
243 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); | 243 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); |
244 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf); | 244 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, |
245 | bool prefault); | ||
245 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, | 246 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, |
246 | struct x86_exception *fault); | 247 | struct x86_exception *fault); |
247 | void (*free)(struct kvm_vcpu *vcpu); | 248 | void (*free)(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 482a5c0c48db..83d45cf0a61c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2284,11 +2284,11 @@ static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) | |||
2284 | return 1; | 2284 | return 1; |
2285 | } | 2285 | } |
2286 | 2286 | ||
2287 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | 2287 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
2288 | gva_t gva, pfn_t *pfn, bool write, bool *writable); | 2288 | gva_t gva, pfn_t *pfn, bool write, bool *writable); |
2289 | 2289 | ||
2290 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, | 2290 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, |
2291 | bool no_apf) | 2291 | bool prefault) |
2292 | { | 2292 | { |
2293 | int r; | 2293 | int r; |
2294 | int level; | 2294 | int level; |
@@ -2310,7 +2310,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, | |||
2310 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 2310 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
2311 | smp_rmb(); | 2311 | smp_rmb(); |
2312 | 2312 | ||
2313 | if (try_async_pf(vcpu, no_apf, gfn, v, &pfn, write, &map_writable)) | 2313 | if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) |
2314 | return 0; | 2314 | return 0; |
2315 | 2315 | ||
2316 | /* mmio */ | 2316 | /* mmio */ |
@@ -2584,7 +2584,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, | |||
2584 | } | 2584 | } |
2585 | 2585 | ||
2586 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | 2586 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
2587 | u32 error_code, bool no_apf) | 2587 | u32 error_code, bool prefault) |
2588 | { | 2588 | { |
2589 | gfn_t gfn; | 2589 | gfn_t gfn; |
2590 | int r; | 2590 | int r; |
@@ -2600,7 +2600,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
2600 | gfn = gva >> PAGE_SHIFT; | 2600 | gfn = gva >> PAGE_SHIFT; |
2601 | 2601 | ||
2602 | return nonpaging_map(vcpu, gva & PAGE_MASK, | 2602 | return nonpaging_map(vcpu, gva & PAGE_MASK, |
2603 | error_code & PFERR_WRITE_MASK, gfn, no_apf); | 2603 | error_code & PFERR_WRITE_MASK, gfn, prefault); |
2604 | } | 2604 | } |
2605 | 2605 | ||
2606 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) | 2606 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) |
@@ -2622,7 +2622,7 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu) | |||
2622 | return kvm_x86_ops->interrupt_allowed(vcpu); | 2622 | return kvm_x86_ops->interrupt_allowed(vcpu); |
2623 | } | 2623 | } |
2624 | 2624 | ||
2625 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | 2625 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
2626 | gva_t gva, pfn_t *pfn, bool write, bool *writable) | 2626 | gva_t gva, pfn_t *pfn, bool write, bool *writable) |
2627 | { | 2627 | { |
2628 | bool async; | 2628 | bool async; |
@@ -2634,7 +2634,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | |||
2634 | 2634 | ||
2635 | put_page(pfn_to_page(*pfn)); | 2635 | put_page(pfn_to_page(*pfn)); |
2636 | 2636 | ||
2637 | if (!no_apf && can_do_async_pf(vcpu)) { | 2637 | if (!prefault && can_do_async_pf(vcpu)) { |
2638 | trace_kvm_try_async_get_page(gva, gfn); | 2638 | trace_kvm_try_async_get_page(gva, gfn); |
2639 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { | 2639 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
2640 | trace_kvm_async_pf_doublefault(gva, gfn); | 2640 | trace_kvm_async_pf_doublefault(gva, gfn); |
@@ -2650,7 +2650,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | |||
2650 | } | 2650 | } |
2651 | 2651 | ||
2652 | static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | 2652 | static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, |
2653 | bool no_apf) | 2653 | bool prefault) |
2654 | { | 2654 | { |
2655 | pfn_t pfn; | 2655 | pfn_t pfn; |
2656 | int r; | 2656 | int r; |
@@ -2674,7 +2674,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
2674 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 2674 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
2675 | smp_rmb(); | 2675 | smp_rmb(); |
2676 | 2676 | ||
2677 | if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn, write, &map_writable)) | 2677 | if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) |
2678 | return 0; | 2678 | return 0; |
2679 | 2679 | ||
2680 | /* mmio */ | 2680 | /* mmio */ |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index d5a0a11d33a1..52b3e91918c6 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -539,7 +539,7 @@ out_gpte_changed: | |||
539 | * a negative value on error. | 539 | * a negative value on error. |
540 | */ | 540 | */ |
541 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | 541 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, |
542 | bool no_apf) | 542 | bool prefault) |
543 | { | 543 | { |
544 | int write_fault = error_code & PFERR_WRITE_MASK; | 544 | int write_fault = error_code & PFERR_WRITE_MASK; |
545 | int user_fault = error_code & PFERR_USER_MASK; | 545 | int user_fault = error_code & PFERR_USER_MASK; |
@@ -581,7 +581,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | |||
581 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 581 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
582 | smp_rmb(); | 582 | smp_rmb(); |
583 | 583 | ||
584 | if (try_async_pf(vcpu, no_apf, walker.gfn, addr, &pfn, write_fault, | 584 | if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, |
585 | &map_writable)) | 585 | &map_writable)) |
586 | return 0; | 586 | return 0; |
587 | 587 | ||