aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/events
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-14 05:22:46 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:21:39 -0500
commitaf585b921e5d1e919947c4b1164b59507fe7cd7b (patch)
treed0d4cc753d4d58934c5986733d7340fe69e523de /include/trace/events
parent010c520e20413dfd567d568aba2b7238acd37e33 (diff)
KVM: Halt vcpu if page it tries to access is swapped out
If a guest accesses swapped out memory do not swap it in from vcpu thread context. Schedule work to do swapping and put vcpu into halted state instead. Interrupts will still be delivered to the guest and if interrupt will cause reschedule guest will continue to run another task. [avi: remove call to get_user_pages_noio(), nacked by Linus; this makes everything synchrnous again] Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/kvm.h90
1 files changed, 90 insertions, 0 deletions
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6dd3a51ab1cb..a78a5e574632 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -185,6 +185,96 @@ TRACE_EVENT(kvm_age_page,
185 __entry->referenced ? "YOUNG" : "OLD") 185 __entry->referenced ? "YOUNG" : "OLD")
186); 186);
187 187
188#ifdef CONFIG_KVM_ASYNC_PF
189TRACE_EVENT(
190 kvm_try_async_get_page,
191 TP_PROTO(bool async, u64 pfn),
192 TP_ARGS(async, pfn),
193
194 TP_STRUCT__entry(
195 __field(__u64, pfn)
196 ),
197
198 TP_fast_assign(
199 __entry->pfn = (!async) ? pfn : (u64)-1;
200 ),
201
202 TP_printk("pfn %#llx", __entry->pfn)
203);
204
205TRACE_EVENT(
206 kvm_async_pf_not_present,
207 TP_PROTO(u64 gva),
208 TP_ARGS(gva),
209
210 TP_STRUCT__entry(
211 __field(__u64, gva)
212 ),
213
214 TP_fast_assign(
215 __entry->gva = gva;
216 ),
217
218 TP_printk("gva %#llx not present", __entry->gva)
219);
220
221TRACE_EVENT(
222 kvm_async_pf_ready,
223 TP_PROTO(u64 gva),
224 TP_ARGS(gva),
225
226 TP_STRUCT__entry(
227 __field(__u64, gva)
228 ),
229
230 TP_fast_assign(
231 __entry->gva = gva;
232 ),
233
234 TP_printk("gva %#llx ready", __entry->gva)
235);
236
237TRACE_EVENT(
238 kvm_async_pf_completed,
239 TP_PROTO(unsigned long address, struct page *page, u64 gva),
240 TP_ARGS(address, page, gva),
241
242 TP_STRUCT__entry(
243 __field(unsigned long, address)
244 __field(pfn_t, pfn)
245 __field(u64, gva)
246 ),
247
248 TP_fast_assign(
249 __entry->address = address;
250 __entry->pfn = page ? page_to_pfn(page) : 0;
251 __entry->gva = gva;
252 ),
253
254 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
255 __entry->address, __entry->pfn)
256);
257
258TRACE_EVENT(
259 kvm_async_pf_doublefault,
260 TP_PROTO(u64 gva, u64 gfn),
261 TP_ARGS(gva, gfn),
262
263 TP_STRUCT__entry(
264 __field(u64, gva)
265 __field(u64, gfn)
266 ),
267
268 TP_fast_assign(
269 __entry->gva = gva;
270 __entry->gfn = gfn;
271 ),
272
273 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
274);
275
276#endif
277
188#endif /* _TRACE_KVM_MAIN_H */ 278#endif /* _TRACE_KVM_MAIN_H */
189 279
190/* This part must be outside protection */ 280/* This part must be outside protection */