aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-14 05:22:46 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:21:39 -0500
commitaf585b921e5d1e919947c4b1164b59507fe7cd7b (patch)
treed0d4cc753d4d58934c5986733d7340fe69e523de /include
parent010c520e20413dfd567d568aba2b7238acd37e33 (diff)
KVM: Halt vcpu if page it tries to access is swapped out
If a guest accesses swapped out memory do not swap it in from vcpu thread context. Schedule work to do swapping and put vcpu into halted state instead. Interrupts will still be delivered to the guest and if interrupt will cause reschedule guest will continue to run another task. [avi: remove call to get_user_pages_noio(), nacked by Linus; this makes everything synchrnous again] Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/kvm_host.h31
-rw-r--r--include/trace/events/kvm.h90
2 files changed, 121 insertions, 0 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a0557422715e..e56acc7857e2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -40,6 +40,7 @@
40#define KVM_REQ_KICK 9 40#define KVM_REQ_KICK 9
41#define KVM_REQ_DEACTIVATE_FPU 10 41#define KVM_REQ_DEACTIVATE_FPU 10
42#define KVM_REQ_EVENT 11 42#define KVM_REQ_EVENT 11
43#define KVM_REQ_APF_HALT 12
43 44
44#define KVM_USERSPACE_IRQ_SOURCE_ID 0 45#define KVM_USERSPACE_IRQ_SOURCE_ID 0
45 46
@@ -74,6 +75,26 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
74int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 75int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
75 struct kvm_io_device *dev); 76 struct kvm_io_device *dev);
76 77
78#ifdef CONFIG_KVM_ASYNC_PF
79struct kvm_async_pf {
80 struct work_struct work;
81 struct list_head link;
82 struct list_head queue;
83 struct kvm_vcpu *vcpu;
84 struct mm_struct *mm;
85 gva_t gva;
86 unsigned long addr;
87 struct kvm_arch_async_pf arch;
88 struct page *page;
89 bool done;
90};
91
92void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
93void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
94int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
95 struct kvm_arch_async_pf *arch);
96#endif
97
77struct kvm_vcpu { 98struct kvm_vcpu {
78 struct kvm *kvm; 99 struct kvm *kvm;
79#ifdef CONFIG_PREEMPT_NOTIFIERS 100#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -104,6 +125,15 @@ struct kvm_vcpu {
104 gpa_t mmio_phys_addr; 125 gpa_t mmio_phys_addr;
105#endif 126#endif
106 127
128#ifdef CONFIG_KVM_ASYNC_PF
129 struct {
130 u32 queued;
131 struct list_head queue;
132 struct list_head done;
133 spinlock_t lock;
134 } async_pf;
135#endif
136
107 struct kvm_vcpu_arch arch; 137 struct kvm_vcpu_arch arch;
108}; 138};
109 139
@@ -302,6 +332,7 @@ void kvm_set_page_accessed(struct page *page);
302 332
303pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); 333pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
304pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 334pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
335pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async);
305pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 336pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
306pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 337pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
307 struct kvm_memory_slot *slot, gfn_t gfn); 338 struct kvm_memory_slot *slot, gfn_t gfn);
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6dd3a51ab1cb..a78a5e574632 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -185,6 +185,96 @@ TRACE_EVENT(kvm_age_page,
185 __entry->referenced ? "YOUNG" : "OLD") 185 __entry->referenced ? "YOUNG" : "OLD")
186); 186);
187 187
188#ifdef CONFIG_KVM_ASYNC_PF
189TRACE_EVENT(
190 kvm_try_async_get_page,
191 TP_PROTO(bool async, u64 pfn),
192 TP_ARGS(async, pfn),
193
194 TP_STRUCT__entry(
195 __field(__u64, pfn)
196 ),
197
198 TP_fast_assign(
199 __entry->pfn = (!async) ? pfn : (u64)-1;
200 ),
201
202 TP_printk("pfn %#llx", __entry->pfn)
203);
204
205TRACE_EVENT(
206 kvm_async_pf_not_present,
207 TP_PROTO(u64 gva),
208 TP_ARGS(gva),
209
210 TP_STRUCT__entry(
211 __field(__u64, gva)
212 ),
213
214 TP_fast_assign(
215 __entry->gva = gva;
216 ),
217
218 TP_printk("gva %#llx not present", __entry->gva)
219);
220
221TRACE_EVENT(
222 kvm_async_pf_ready,
223 TP_PROTO(u64 gva),
224 TP_ARGS(gva),
225
226 TP_STRUCT__entry(
227 __field(__u64, gva)
228 ),
229
230 TP_fast_assign(
231 __entry->gva = gva;
232 ),
233
234 TP_printk("gva %#llx ready", __entry->gva)
235);
236
237TRACE_EVENT(
238 kvm_async_pf_completed,
239 TP_PROTO(unsigned long address, struct page *page, u64 gva),
240 TP_ARGS(address, page, gva),
241
242 TP_STRUCT__entry(
243 __field(unsigned long, address)
244 __field(pfn_t, pfn)
245 __field(u64, gva)
246 ),
247
248 TP_fast_assign(
249 __entry->address = address;
250 __entry->pfn = page ? page_to_pfn(page) : 0;
251 __entry->gva = gva;
252 ),
253
254 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
255 __entry->address, __entry->pfn)
256);
257
258TRACE_EVENT(
259 kvm_async_pf_doublefault,
260 TP_PROTO(u64 gva, u64 gfn),
261 TP_ARGS(gva, gfn),
262
263 TP_STRUCT__entry(
264 __field(u64, gva)
265 __field(u64, gfn)
266 ),
267
268 TP_fast_assign(
269 __entry->gva = gva;
270 __entry->gfn = gfn;
271 ),
272
273 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
274);
275
276#endif
277
188#endif /* _TRACE_KVM_MAIN_H */ 278#endif /* _TRACE_KVM_MAIN_H */
189 279
190/* This part must be outside protection */ 280/* This part must be outside protection */