aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-14 05:22:46 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:21:39 -0500
commitaf585b921e5d1e919947c4b1164b59507fe7cd7b (patch)
treed0d4cc753d4d58934c5986733d7340fe69e523de /arch/x86/kvm/mmu.c
parent010c520e20413dfd567d568aba2b7238acd37e33 (diff)
KVM: Halt vcpu if page it tries to access is swapped out
If a guest accesses swapped out memory do not swap it in from vcpu thread context. Schedule work to do swapping and put vcpu into halted state instead. Interrupts will still be delivered to the guest and if interrupt will cause reschedule guest will continue to run another task. [avi: remove call to get_user_pages_noio(), nacked by Linus; this makes everything synchrnous again] Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c52
1 files changed, 51 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fbb04aee8301..4ab04de5a76a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -18,9 +18,11 @@
18 * 18 *
19 */ 19 */
20 20
21#include "irq.h"
21#include "mmu.h" 22#include "mmu.h"
22#include "x86.h" 23#include "x86.h"
23#include "kvm_cache_regs.h" 24#include "kvm_cache_regs.h"
25#include "x86.h"
24 26
25#include <linux/kvm_host.h> 27#include <linux/kvm_host.h>
26#include <linux/types.h> 28#include <linux/types.h>
@@ -2587,6 +2589,50 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2587 error_code & PFERR_WRITE_MASK, gfn); 2589 error_code & PFERR_WRITE_MASK, gfn);
2588} 2590}
2589 2591
2592int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
2593{
2594 struct kvm_arch_async_pf arch;
2595 arch.gfn = gfn;
2596
2597 return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
2598}
2599
2600static bool can_do_async_pf(struct kvm_vcpu *vcpu)
2601{
2602 if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
2603 kvm_event_needs_reinjection(vcpu)))
2604 return false;
2605
2606 return kvm_x86_ops->interrupt_allowed(vcpu);
2607}
2608
2609static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
2610 pfn_t *pfn)
2611{
2612 bool async;
2613
2614 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async);
2615
2616 if (!async)
2617 return false; /* *pfn has correct page already */
2618
2619 put_page(pfn_to_page(*pfn));
2620
2621 if (can_do_async_pf(vcpu)) {
2622 trace_kvm_try_async_get_page(async, *pfn);
2623 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
2624 trace_kvm_async_pf_doublefault(gva, gfn);
2625 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
2626 return true;
2627 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
2628 return true;
2629 }
2630
2631 *pfn = gfn_to_pfn(vcpu->kvm, gfn);
2632
2633 return false;
2634}
2635
2590static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, 2636static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2591 u32 error_code) 2637 u32 error_code)
2592{ 2638{
@@ -2609,7 +2655,11 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2609 2655
2610 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2656 mmu_seq = vcpu->kvm->mmu_notifier_seq;
2611 smp_rmb(); 2657 smp_rmb();
2612 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2658
2659 if (try_async_pf(vcpu, gfn, gpa, &pfn))
2660 return 0;
2661
2662 /* mmio */
2613 if (is_error_pfn(pfn)) 2663 if (is_error_pfn(pfn))
2614 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); 2664 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2615 spin_lock(&vcpu->kvm->mmu_lock); 2665 spin_lock(&vcpu->kvm->mmu_lock);