diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 13:14:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 13:14:24 -0500 |
commit | 55065bc52795faae549abfb912aacc622dd63876 (patch) | |
tree | 63683547e41ed459a2a8747eeafb5e969633d54f /include | |
parent | 008d23e4852d78bb2618f2035f8b2110b6a6b968 (diff) | |
parent | e5c301428294cb8925667c9ee39f817c4ab1c2c9 (diff) |
Merge branch 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (142 commits)
KVM: Initialize fpu state in preemptible context
KVM: VMX: when entering real mode align segment base to 16 bytes
KVM: MMU: handle 'map_writable' in set_spte() function
KVM: MMU: audit: allow audit more guests at the same time
KVM: Fetch guest cr3 from hardware on demand
KVM: Replace reads of vcpu->arch.cr3 by an accessor
KVM: MMU: only write protect mappings at pagetable level
KVM: VMX: Correct asm constraint in vmcs_load()/vmcs_clear()
KVM: MMU: Initialize base_role for tdp mmus
KVM: VMX: Optimize atomic EFER load
KVM: VMX: Add definitions for more vm entry/exit control bits
KVM: SVM: copy instruction bytes from VMCB
KVM: SVM: implement enhanced INVLPG intercept
KVM: SVM: enhance mov DR intercept handler
KVM: SVM: enhance MOV CR intercept handler
KVM: SVM: add new SVM feature bit names
KVM: cleanup emulate_instruction
KVM: move complete_insn_gp() into x86.c
KVM: x86: fix CR8 handling
KVM guest: Fix kvm clock initialization when it's configured out
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/kvm.h | 1 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 101 | ||||
-rw-r--r-- | include/linux/kvm_types.h | 7 | ||||
-rw-r--r-- | include/trace/events/kvm.h | 121 |
4 files changed, 215 insertions, 15 deletions
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 919ae53adc5c..ea2dc1a2e13d 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -540,6 +540,7 @@ struct kvm_ppc_pvinfo { | |||
540 | #endif | 540 | #endif |
541 | #define KVM_CAP_PPC_GET_PVINFO 57 | 541 | #define KVM_CAP_PPC_GET_PVINFO 57 |
542 | #define KVM_CAP_PPC_IRQ_LEVEL 58 | 542 | #define KVM_CAP_PPC_IRQ_LEVEL 58 |
543 | #define KVM_CAP_ASYNC_PF 59 | ||
543 | 544 | ||
544 | #ifdef KVM_CAP_IRQ_ROUTING | 545 | #ifdef KVM_CAP_IRQ_ROUTING |
545 | 546 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a0557422715e..b5021db21858 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/preempt.h> | 17 | #include <linux/preempt.h> |
18 | #include <linux/msi.h> | 18 | #include <linux/msi.h> |
19 | #include <linux/slab.h> | ||
20 | #include <linux/rcupdate.h> | ||
19 | #include <asm/signal.h> | 21 | #include <asm/signal.h> |
20 | 22 | ||
21 | #include <linux/kvm.h> | 23 | #include <linux/kvm.h> |
@@ -40,6 +42,7 @@ | |||
40 | #define KVM_REQ_KICK 9 | 42 | #define KVM_REQ_KICK 9 |
41 | #define KVM_REQ_DEACTIVATE_FPU 10 | 43 | #define KVM_REQ_DEACTIVATE_FPU 10 |
42 | #define KVM_REQ_EVENT 11 | 44 | #define KVM_REQ_EVENT 11 |
45 | #define KVM_REQ_APF_HALT 12 | ||
43 | 46 | ||
44 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 47 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
45 | 48 | ||
@@ -74,6 +77,27 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, | |||
74 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, | 77 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
75 | struct kvm_io_device *dev); | 78 | struct kvm_io_device *dev); |
76 | 79 | ||
80 | #ifdef CONFIG_KVM_ASYNC_PF | ||
81 | struct kvm_async_pf { | ||
82 | struct work_struct work; | ||
83 | struct list_head link; | ||
84 | struct list_head queue; | ||
85 | struct kvm_vcpu *vcpu; | ||
86 | struct mm_struct *mm; | ||
87 | gva_t gva; | ||
88 | unsigned long addr; | ||
89 | struct kvm_arch_async_pf arch; | ||
90 | struct page *page; | ||
91 | bool done; | ||
92 | }; | ||
93 | |||
94 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); | ||
95 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); | ||
96 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, | ||
97 | struct kvm_arch_async_pf *arch); | ||
98 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); | ||
99 | #endif | ||
100 | |||
77 | struct kvm_vcpu { | 101 | struct kvm_vcpu { |
78 | struct kvm *kvm; | 102 | struct kvm *kvm; |
79 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 103 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
@@ -104,6 +128,15 @@ struct kvm_vcpu { | |||
104 | gpa_t mmio_phys_addr; | 128 | gpa_t mmio_phys_addr; |
105 | #endif | 129 | #endif |
106 | 130 | ||
131 | #ifdef CONFIG_KVM_ASYNC_PF | ||
132 | struct { | ||
133 | u32 queued; | ||
134 | struct list_head queue; | ||
135 | struct list_head done; | ||
136 | spinlock_t lock; | ||
137 | } async_pf; | ||
138 | #endif | ||
139 | |||
107 | struct kvm_vcpu_arch arch; | 140 | struct kvm_vcpu_arch arch; |
108 | }; | 141 | }; |
109 | 142 | ||
@@ -113,16 +146,19 @@ struct kvm_vcpu { | |||
113 | */ | 146 | */ |
114 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) | 147 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
115 | 148 | ||
149 | struct kvm_lpage_info { | ||
150 | unsigned long rmap_pde; | ||
151 | int write_count; | ||
152 | }; | ||
153 | |||
116 | struct kvm_memory_slot { | 154 | struct kvm_memory_slot { |
117 | gfn_t base_gfn; | 155 | gfn_t base_gfn; |
118 | unsigned long npages; | 156 | unsigned long npages; |
119 | unsigned long flags; | 157 | unsigned long flags; |
120 | unsigned long *rmap; | 158 | unsigned long *rmap; |
121 | unsigned long *dirty_bitmap; | 159 | unsigned long *dirty_bitmap; |
122 | struct { | 160 | unsigned long *dirty_bitmap_head; |
123 | unsigned long rmap_pde; | 161 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
124 | int write_count; | ||
125 | } *lpage_info[KVM_NR_PAGE_SIZES - 1]; | ||
126 | unsigned long userspace_addr; | 162 | unsigned long userspace_addr; |
127 | int user_alloc; | 163 | int user_alloc; |
128 | int id; | 164 | int id; |
@@ -169,6 +205,7 @@ struct kvm_irq_routing_table {}; | |||
169 | 205 | ||
170 | struct kvm_memslots { | 206 | struct kvm_memslots { |
171 | int nmemslots; | 207 | int nmemslots; |
208 | u64 generation; | ||
172 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + | 209 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
173 | KVM_PRIVATE_MEM_SLOTS]; | 210 | KVM_PRIVATE_MEM_SLOTS]; |
174 | }; | 211 | }; |
@@ -206,6 +243,10 @@ struct kvm { | |||
206 | 243 | ||
207 | struct mutex irq_lock; | 244 | struct mutex irq_lock; |
208 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 245 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
246 | /* | ||
247 | * Update side is protected by irq_lock and, | ||
248 | * if configured, irqfds.lock. | ||
249 | */ | ||
209 | struct kvm_irq_routing_table __rcu *irq_routing; | 250 | struct kvm_irq_routing_table __rcu *irq_routing; |
210 | struct hlist_head mask_notifier_list; | 251 | struct hlist_head mask_notifier_list; |
211 | struct hlist_head irq_ack_notifier_list; | 252 | struct hlist_head irq_ack_notifier_list; |
@@ -216,6 +257,7 @@ struct kvm { | |||
216 | unsigned long mmu_notifier_seq; | 257 | unsigned long mmu_notifier_seq; |
217 | long mmu_notifier_count; | 258 | long mmu_notifier_count; |
218 | #endif | 259 | #endif |
260 | long tlbs_dirty; | ||
219 | }; | 261 | }; |
220 | 262 | ||
221 | /* The guest did something we don't support. */ | 263 | /* The guest did something we don't support. */ |
@@ -302,7 +344,11 @@ void kvm_set_page_accessed(struct page *page); | |||
302 | 344 | ||
303 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); | 345 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); |
304 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); | 346 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
347 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, | ||
348 | bool write_fault, bool *writable); | ||
305 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | 349 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
350 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | ||
351 | bool *writable); | ||
306 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, | 352 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, |
307 | struct kvm_memory_slot *slot, gfn_t gfn); | 353 | struct kvm_memory_slot *slot, gfn_t gfn); |
308 | int memslot_id(struct kvm *kvm, gfn_t gfn); | 354 | int memslot_id(struct kvm *kvm, gfn_t gfn); |
@@ -321,18 +367,25 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | |||
321 | int offset, int len); | 367 | int offset, int len); |
322 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | 368 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
323 | unsigned long len); | 369 | unsigned long len); |
370 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | ||
371 | void *data, unsigned long len); | ||
372 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | ||
373 | gpa_t gpa); | ||
324 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); | 374 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
325 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | 375 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
326 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | 376 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
327 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); | 377 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
328 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); | 378 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
329 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | 379 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
380 | void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, | ||
381 | gfn_t gfn); | ||
330 | 382 | ||
331 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | 383 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
332 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); | 384 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
333 | void kvm_resched(struct kvm_vcpu *vcpu); | 385 | void kvm_resched(struct kvm_vcpu *vcpu); |
334 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); | 386 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
335 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | 387 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
388 | |||
336 | void kvm_flush_remote_tlbs(struct kvm *kvm); | 389 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
337 | void kvm_reload_remote_mmus(struct kvm *kvm); | 390 | void kvm_reload_remote_mmus(struct kvm *kvm); |
338 | 391 | ||
@@ -398,7 +451,19 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | |||
398 | 451 | ||
399 | void kvm_free_physmem(struct kvm *kvm); | 452 | void kvm_free_physmem(struct kvm *kvm); |
400 | 453 | ||
401 | struct kvm *kvm_arch_create_vm(void); | 454 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
455 | static inline struct kvm *kvm_arch_alloc_vm(void) | ||
456 | { | ||
457 | return kzalloc(sizeof(struct kvm), GFP_KERNEL); | ||
458 | } | ||
459 | |||
460 | static inline void kvm_arch_free_vm(struct kvm *kvm) | ||
461 | { | ||
462 | kfree(kvm); | ||
463 | } | ||
464 | #endif | ||
465 | |||
466 | int kvm_arch_init_vm(struct kvm *kvm); | ||
402 | void kvm_arch_destroy_vm(struct kvm *kvm); | 467 | void kvm_arch_destroy_vm(struct kvm *kvm); |
403 | void kvm_free_all_assigned_devices(struct kvm *kvm); | 468 | void kvm_free_all_assigned_devices(struct kvm *kvm); |
404 | void kvm_arch_sync_events(struct kvm *kvm); | 469 | void kvm_arch_sync_events(struct kvm *kvm); |
@@ -414,16 +479,8 @@ struct kvm_irq_ack_notifier { | |||
414 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | 479 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
415 | }; | 480 | }; |
416 | 481 | ||
417 | #define KVM_ASSIGNED_MSIX_PENDING 0x1 | ||
418 | struct kvm_guest_msix_entry { | ||
419 | u32 vector; | ||
420 | u16 entry; | ||
421 | u16 flags; | ||
422 | }; | ||
423 | |||
424 | struct kvm_assigned_dev_kernel { | 482 | struct kvm_assigned_dev_kernel { |
425 | struct kvm_irq_ack_notifier ack_notifier; | 483 | struct kvm_irq_ack_notifier ack_notifier; |
426 | struct work_struct interrupt_work; | ||
427 | struct list_head list; | 484 | struct list_head list; |
428 | int assigned_dev_id; | 485 | int assigned_dev_id; |
429 | int host_segnr; | 486 | int host_segnr; |
@@ -434,13 +491,14 @@ struct kvm_assigned_dev_kernel { | |||
434 | bool host_irq_disabled; | 491 | bool host_irq_disabled; |
435 | struct msix_entry *host_msix_entries; | 492 | struct msix_entry *host_msix_entries; |
436 | int guest_irq; | 493 | int guest_irq; |
437 | struct kvm_guest_msix_entry *guest_msix_entries; | 494 | struct msix_entry *guest_msix_entries; |
438 | unsigned long irq_requested_type; | 495 | unsigned long irq_requested_type; |
439 | int irq_source_id; | 496 | int irq_source_id; |
440 | int flags; | 497 | int flags; |
441 | struct pci_dev *dev; | 498 | struct pci_dev *dev; |
442 | struct kvm *kvm; | 499 | struct kvm *kvm; |
443 | spinlock_t assigned_dev_lock; | 500 | spinlock_t intx_lock; |
501 | char irq_name[32]; | ||
444 | }; | 502 | }; |
445 | 503 | ||
446 | struct kvm_irq_mask_notifier { | 504 | struct kvm_irq_mask_notifier { |
@@ -462,6 +520,8 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | |||
462 | unsigned long *deliver_bitmask); | 520 | unsigned long *deliver_bitmask); |
463 | #endif | 521 | #endif |
464 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); | 522 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); |
523 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, | ||
524 | int irq_source_id, int level); | ||
465 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); | 525 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
466 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 526 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
467 | struct kvm_irq_ack_notifier *kian); | 527 | struct kvm_irq_ack_notifier *kian); |
@@ -603,17 +663,28 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |||
603 | void kvm_eventfd_init(struct kvm *kvm); | 663 | void kvm_eventfd_init(struct kvm *kvm); |
604 | int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); | 664 | int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); |
605 | void kvm_irqfd_release(struct kvm *kvm); | 665 | void kvm_irqfd_release(struct kvm *kvm); |
666 | void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); | ||
606 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); | 667 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
607 | 668 | ||
608 | #else | 669 | #else |
609 | 670 | ||
610 | static inline void kvm_eventfd_init(struct kvm *kvm) {} | 671 | static inline void kvm_eventfd_init(struct kvm *kvm) {} |
672 | |||
611 | static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) | 673 | static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) |
612 | { | 674 | { |
613 | return -EINVAL; | 675 | return -EINVAL; |
614 | } | 676 | } |
615 | 677 | ||
616 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | 678 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
679 | |||
680 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | ||
681 | static inline void kvm_irq_routing_update(struct kvm *kvm, | ||
682 | struct kvm_irq_routing_table *irq_rt) | ||
683 | { | ||
684 | rcu_assign_pointer(kvm->irq_routing, irq_rt); | ||
685 | } | ||
686 | #endif | ||
687 | |||
617 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 688 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
618 | { | 689 | { |
619 | return -ENOSYS; | 690 | return -ENOSYS; |
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 7ac0d4eee430..fa7cc7244cbd 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h | |||
@@ -67,4 +67,11 @@ struct kvm_lapic_irq { | |||
67 | u32 dest_id; | 67 | u32 dest_id; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct gfn_to_hva_cache { | ||
71 | u64 generation; | ||
72 | gpa_t gpa; | ||
73 | unsigned long hva; | ||
74 | struct kvm_memory_slot *memslot; | ||
75 | }; | ||
76 | |||
70 | #endif /* __KVM_TYPES_H__ */ | 77 | #endif /* __KVM_TYPES_H__ */ |
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 6dd3a51ab1cb..46e3cd8e197a 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h | |||
@@ -6,6 +6,36 @@ | |||
6 | #undef TRACE_SYSTEM | 6 | #undef TRACE_SYSTEM |
7 | #define TRACE_SYSTEM kvm | 7 | #define TRACE_SYSTEM kvm |
8 | 8 | ||
9 | #define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x } | ||
10 | |||
11 | #define kvm_trace_exit_reason \ | ||
12 | ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \ | ||
13 | ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \ | ||
14 | ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \ | ||
15 | ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ | ||
16 | ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI) | ||
17 | |||
18 | TRACE_EVENT(kvm_userspace_exit, | ||
19 | TP_PROTO(__u32 reason, int errno), | ||
20 | TP_ARGS(reason, errno), | ||
21 | |||
22 | TP_STRUCT__entry( | ||
23 | __field( __u32, reason ) | ||
24 | __field( int, errno ) | ||
25 | ), | ||
26 | |||
27 | TP_fast_assign( | ||
28 | __entry->reason = reason; | ||
29 | __entry->errno = errno; | ||
30 | ), | ||
31 | |||
32 | TP_printk("reason %s (%d)", | ||
33 | __entry->errno < 0 ? | ||
34 | (__entry->errno == -EINTR ? "restart" : "error") : | ||
35 | __print_symbolic(__entry->reason, kvm_trace_exit_reason), | ||
36 | __entry->errno < 0 ? -__entry->errno : __entry->reason) | ||
37 | ); | ||
38 | |||
9 | #if defined(__KVM_HAVE_IOAPIC) | 39 | #if defined(__KVM_HAVE_IOAPIC) |
10 | TRACE_EVENT(kvm_set_irq, | 40 | TRACE_EVENT(kvm_set_irq, |
11 | TP_PROTO(unsigned int gsi, int level, int irq_source_id), | 41 | TP_PROTO(unsigned int gsi, int level, int irq_source_id), |
@@ -185,6 +215,97 @@ TRACE_EVENT(kvm_age_page, | |||
185 | __entry->referenced ? "YOUNG" : "OLD") | 215 | __entry->referenced ? "YOUNG" : "OLD") |
186 | ); | 216 | ); |
187 | 217 | ||
218 | #ifdef CONFIG_KVM_ASYNC_PF | ||
219 | DECLARE_EVENT_CLASS(kvm_async_get_page_class, | ||
220 | |||
221 | TP_PROTO(u64 gva, u64 gfn), | ||
222 | |||
223 | TP_ARGS(gva, gfn), | ||
224 | |||
225 | TP_STRUCT__entry( | ||
226 | __field(__u64, gva) | ||
227 | __field(u64, gfn) | ||
228 | ), | ||
229 | |||
230 | TP_fast_assign( | ||
231 | __entry->gva = gva; | ||
232 | __entry->gfn = gfn; | ||
233 | ), | ||
234 | |||
235 | TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) | ||
236 | ); | ||
237 | |||
238 | DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page, | ||
239 | |||
240 | TP_PROTO(u64 gva, u64 gfn), | ||
241 | |||
242 | TP_ARGS(gva, gfn) | ||
243 | ); | ||
244 | |||
245 | DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault, | ||
246 | |||
247 | TP_PROTO(u64 gva, u64 gfn), | ||
248 | |||
249 | TP_ARGS(gva, gfn) | ||
250 | ); | ||
251 | |||
252 | DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready, | ||
253 | |||
254 | TP_PROTO(u64 token, u64 gva), | ||
255 | |||
256 | TP_ARGS(token, gva), | ||
257 | |||
258 | TP_STRUCT__entry( | ||
259 | __field(__u64, token) | ||
260 | __field(__u64, gva) | ||
261 | ), | ||
262 | |||
263 | TP_fast_assign( | ||
264 | __entry->token = token; | ||
265 | __entry->gva = gva; | ||
266 | ), | ||
267 | |||
268 | TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva) | ||
269 | |||
270 | ); | ||
271 | |||
272 | DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present, | ||
273 | |||
274 | TP_PROTO(u64 token, u64 gva), | ||
275 | |||
276 | TP_ARGS(token, gva) | ||
277 | ); | ||
278 | |||
279 | DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready, | ||
280 | |||
281 | TP_PROTO(u64 token, u64 gva), | ||
282 | |||
283 | TP_ARGS(token, gva) | ||
284 | ); | ||
285 | |||
286 | TRACE_EVENT( | ||
287 | kvm_async_pf_completed, | ||
288 | TP_PROTO(unsigned long address, struct page *page, u64 gva), | ||
289 | TP_ARGS(address, page, gva), | ||
290 | |||
291 | TP_STRUCT__entry( | ||
292 | __field(unsigned long, address) | ||
293 | __field(pfn_t, pfn) | ||
294 | __field(u64, gva) | ||
295 | ), | ||
296 | |||
297 | TP_fast_assign( | ||
298 | __entry->address = address; | ||
299 | __entry->pfn = page ? page_to_pfn(page) : 0; | ||
300 | __entry->gva = gva; | ||
301 | ), | ||
302 | |||
303 | TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva, | ||
304 | __entry->address, __entry->pfn) | ||
305 | ); | ||
306 | |||
307 | #endif | ||
308 | |||
188 | #endif /* _TRACE_KVM_MAIN_H */ | 309 | #endif /* _TRACE_KVM_MAIN_H */ |
189 | 310 | ||
190 | /* This part must be outside protection */ | 311 | /* This part must be outside protection */ |