diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/Kconfig | 4 | ||||
-rw-r--r-- | virt/kvm/async_pf.c | 20 |
2 files changed, 22 insertions, 2 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index fbe1a48bd629..13f2d19793e3 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig | |||
@@ -22,6 +22,10 @@ config KVM_MMIO | |||
22 | config KVM_ASYNC_PF | 22 | config KVM_ASYNC_PF |
23 | bool | 23 | bool |
24 | 24 | ||
25 | # Toggle to switch between direct notification and batch job | ||
26 | config KVM_ASYNC_PF_SYNC | ||
27 | bool | ||
28 | |||
25 | config HAVE_KVM_MSI | 29 | config HAVE_KVM_MSI |
26 | bool | 30 | bool |
27 | 31 | ||
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 8631d9c14320..00980ab02c45 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c | |||
@@ -28,6 +28,21 @@ | |||
28 | #include "async_pf.h" | 28 | #include "async_pf.h" |
29 | #include <trace/events/kvm.h> | 29 | #include <trace/events/kvm.h> |
30 | 30 | ||
31 | static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, | ||
32 | struct kvm_async_pf *work) | ||
33 | { | ||
34 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC | ||
35 | kvm_arch_async_page_present(vcpu, work); | ||
36 | #endif | ||
37 | } | ||
38 | static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, | ||
39 | struct kvm_async_pf *work) | ||
40 | { | ||
41 | #ifndef CONFIG_KVM_ASYNC_PF_SYNC | ||
42 | kvm_arch_async_page_present(vcpu, work); | ||
43 | #endif | ||
44 | } | ||
45 | |||
31 | static struct kmem_cache *async_pf_cache; | 46 | static struct kmem_cache *async_pf_cache; |
32 | 47 | ||
33 | int kvm_async_pf_init(void) | 48 | int kvm_async_pf_init(void) |
@@ -69,6 +84,7 @@ static void async_pf_execute(struct work_struct *work) | |||
69 | down_read(&mm->mmap_sem); | 84 | down_read(&mm->mmap_sem); |
70 | get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL); | 85 | get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL); |
71 | up_read(&mm->mmap_sem); | 86 | up_read(&mm->mmap_sem); |
87 | kvm_async_page_present_sync(vcpu, apf); | ||
72 | unuse_mm(mm); | 88 | unuse_mm(mm); |
73 | 89 | ||
74 | spin_lock(&vcpu->async_pf.lock); | 90 | spin_lock(&vcpu->async_pf.lock); |
@@ -138,7 +154,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) | |||
138 | } | 154 | } |
139 | } | 155 | } |
140 | 156 | ||
141 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, | 157 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, |
142 | struct kvm_arch_async_pf *arch) | 158 | struct kvm_arch_async_pf *arch) |
143 | { | 159 | { |
144 | struct kvm_async_pf *work; | 160 | struct kvm_async_pf *work; |
@@ -159,7 +175,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, | |||
159 | work->wakeup_all = false; | 175 | work->wakeup_all = false; |
160 | work->vcpu = vcpu; | 176 | work->vcpu = vcpu; |
161 | work->gva = gva; | 177 | work->gva = gva; |
162 | work->addr = gfn_to_hva(vcpu->kvm, gfn); | 178 | work->addr = hva; |
163 | work->arch = *arch; | 179 | work->arch = *arch; |
164 | work->mm = current->mm; | 180 | work->mm = current->mm; |
165 | atomic_inc(&work->mm->mm_count); | 181 | atomic_inc(&work->mm->mm_count); |