aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorDominik Dingel <dingel@linux.vnet.ibm.com>2013-06-06 09:32:37 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-01-30 06:51:38 -0500
commite0ead41a6dac09f86675ce07a66e4b253a9b7bd5 (patch)
treeed0026f1f858cbab03db7dc83cb7820d0a348821 /virt/kvm
parent24eb3a824c4f3ccfaa2305dc1d9d9e2a708828c5 (diff)
KVM: async_pf: Provide additional direct page notification
By setting a Kconfig option, the architecture can control when guest notifications will be presented by the apf backend. There is the default batch mechanism, working as before, where the vcpu thread should pull in this information. Opposite to this, there is now the direct mechanism, that will push the information to the guest. This way s390 can use an already existing architecture interface. Still the vcpu thread should call check_completion to cleanup leftovers. Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/Kconfig4
-rw-r--r--virt/kvm/async_pf.c20
2 files changed, 22 insertions, 2 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index fbe1a48bd629..13f2d19793e3 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -22,6 +22,10 @@ config KVM_MMIO
22config KVM_ASYNC_PF 22config KVM_ASYNC_PF
23 bool 23 bool
24 24
25# Toggle to switch between direct notification and batch job
26config KVM_ASYNC_PF_SYNC
27 bool
28
25config HAVE_KVM_MSI 29config HAVE_KVM_MSI
26 bool 30 bool
27 31
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8631d9c14320..00980ab02c45 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -28,6 +28,21 @@
28#include "async_pf.h" 28#include "async_pf.h"
29#include <trace/events/kvm.h> 29#include <trace/events/kvm.h>
30 30
31static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
32 struct kvm_async_pf *work)
33{
34#ifdef CONFIG_KVM_ASYNC_PF_SYNC
35 kvm_arch_async_page_present(vcpu, work);
36#endif
37}
38static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
39 struct kvm_async_pf *work)
40{
41#ifndef CONFIG_KVM_ASYNC_PF_SYNC
42 kvm_arch_async_page_present(vcpu, work);
43#endif
44}
45
31static struct kmem_cache *async_pf_cache; 46static struct kmem_cache *async_pf_cache;
32 47
33int kvm_async_pf_init(void) 48int kvm_async_pf_init(void)
@@ -69,6 +84,7 @@ static void async_pf_execute(struct work_struct *work)
69 down_read(&mm->mmap_sem); 84 down_read(&mm->mmap_sem);
70 get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL); 85 get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
71 up_read(&mm->mmap_sem); 86 up_read(&mm->mmap_sem);
87 kvm_async_page_present_sync(vcpu, apf);
72 unuse_mm(mm); 88 unuse_mm(mm);
73 89
74 spin_lock(&vcpu->async_pf.lock); 90 spin_lock(&vcpu->async_pf.lock);
@@ -138,7 +154,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
138 } 154 }
139} 155}
140 156
141int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 157int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
142 struct kvm_arch_async_pf *arch) 158 struct kvm_arch_async_pf *arch)
143{ 159{
144 struct kvm_async_pf *work; 160 struct kvm_async_pf *work;
@@ -159,7 +175,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
159 work->wakeup_all = false; 175 work->wakeup_all = false;
160 work->vcpu = vcpu; 176 work->vcpu = vcpu;
161 work->gva = gva; 177 work->gva = gva;
162 work->addr = gfn_to_hva(vcpu->kvm, gfn); 178 work->addr = hva;
163 work->arch = *arch; 179 work->arch = *arch;
164 work->mm = current->mm; 180 work->mm = current->mm;
165 atomic_inc(&work->mm->mm_count); 181 atomic_inc(&work->mm->mm_count);