diff options
author | Avi Kivity <avi@redhat.com> | 2012-07-26 04:54:21 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-07-26 04:54:21 -0400 |
commit | e9bda6f6f902e6b55d9baceb5523468a048cbe56 (patch) | |
tree | bf09cc165da1197cd34967da0593d08b9a37c0f3 /include | |
parent | bdc0077af574800d24318b6945cf2344e8dbb050 (diff) | |
parent | 06e48c510aa37f6e791602e6420422ea7071fe94 (diff) |
Merge branch 'queue' into next
Merge patches queued during the run-up to the merge window.
* queue: (25 commits)
KVM: Choose better candidate for directed yield
KVM: Note down when cpu relax intercepted or pause loop exited
KVM: Add config to support ple or cpu relax optimzation
KVM: switch to symbolic name for irq_states size
KVM: x86: Fix typos in pmu.c
KVM: x86: Fix typos in lapic.c
KVM: x86: Fix typos in cpuid.c
KVM: x86: Fix typos in emulate.c
KVM: x86: Fix typos in x86.c
KVM: SVM: Fix typos
KVM: VMX: Fix typos
KVM: remove the unused parameter of gfn_to_pfn_memslot
KVM: remove is_error_hpa
KVM: make bad_pfn static to kvm_main.c
KVM: using get_fault_pfn to get the fault pfn
KVM: MMU: track the refcount when unmap the page
KVM: x86: remove unnecessary mark_page_dirty
KVM: MMU: Avoid handling same rmap_pde in kvm_handle_hva_range()
KVM: MMU: Push trace_kvm_age_page() into kvm_age_rmapp()
KVM: MMU: Add memslot parameter to hva handlers
...
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/kvm_host.h | 62 |
1 files changed, 50 insertions, 12 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b70b48b01098..1993eb1cb2cd 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -183,6 +183,18 @@ struct kvm_vcpu { | |||
183 | } async_pf; | 183 | } async_pf; |
184 | #endif | 184 | #endif |
185 | 185 | ||
186 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | ||
187 | /* | ||
188 | * Cpu relax intercept or pause loop exit optimization | ||
189 | * in_spin_loop: set when a vcpu does a pause loop exit | ||
190 | * or cpu relax intercepted. | ||
191 | * dy_eligible: indicates whether vcpu is eligible for directed yield. | ||
192 | */ | ||
193 | struct { | ||
194 | bool in_spin_loop; | ||
195 | bool dy_eligible; | ||
196 | } spin_loop; | ||
197 | #endif | ||
186 | struct kvm_vcpu_arch arch; | 198 | struct kvm_vcpu_arch arch; |
187 | }; | 199 | }; |
188 | 200 | ||
@@ -378,20 +390,11 @@ id_to_memslot(struct kvm_memslots *slots, int id) | |||
378 | return slot; | 390 | return slot; |
379 | } | 391 | } |
380 | 392 | ||
381 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | ||
382 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) | ||
383 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | ||
384 | |||
385 | extern struct page *bad_page; | 393 | extern struct page *bad_page; |
386 | extern struct page *fault_page; | ||
387 | |||
388 | extern pfn_t bad_pfn; | ||
389 | extern pfn_t fault_pfn; | ||
390 | 394 | ||
391 | int is_error_page(struct page *page); | 395 | int is_error_page(struct page *page); |
392 | int is_error_pfn(pfn_t pfn); | 396 | int is_error_pfn(pfn_t pfn); |
393 | int is_hwpoison_pfn(pfn_t pfn); | 397 | int is_hwpoison_pfn(pfn_t pfn); |
394 | int is_fault_pfn(pfn_t pfn); | ||
395 | int is_noslot_pfn(pfn_t pfn); | 398 | int is_noslot_pfn(pfn_t pfn); |
396 | int is_invalid_pfn(pfn_t pfn); | 399 | int is_invalid_pfn(pfn_t pfn); |
397 | int kvm_is_error_hva(unsigned long addr); | 400 | int kvm_is_error_hva(unsigned long addr); |
@@ -427,20 +430,20 @@ void kvm_release_page_dirty(struct page *page); | |||
427 | void kvm_set_page_dirty(struct page *page); | 430 | void kvm_set_page_dirty(struct page *page); |
428 | void kvm_set_page_accessed(struct page *page); | 431 | void kvm_set_page_accessed(struct page *page); |
429 | 432 | ||
430 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); | 433 | pfn_t hva_to_pfn_atomic(unsigned long addr); |
431 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); | 434 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
432 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, | 435 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, |
433 | bool write_fault, bool *writable); | 436 | bool write_fault, bool *writable); |
434 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | 437 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
435 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | 438 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
436 | bool *writable); | 439 | bool *writable); |
437 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, | 440 | pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
438 | struct kvm_memory_slot *slot, gfn_t gfn); | ||
439 | void kvm_release_pfn_dirty(pfn_t); | 441 | void kvm_release_pfn_dirty(pfn_t); |
440 | void kvm_release_pfn_clean(pfn_t pfn); | 442 | void kvm_release_pfn_clean(pfn_t pfn); |
441 | void kvm_set_pfn_dirty(pfn_t pfn); | 443 | void kvm_set_pfn_dirty(pfn_t pfn); |
442 | void kvm_set_pfn_accessed(pfn_t pfn); | 444 | void kvm_set_pfn_accessed(pfn_t pfn); |
443 | void kvm_get_pfn(pfn_t pfn); | 445 | void kvm_get_pfn(pfn_t pfn); |
446 | pfn_t get_fault_pfn(void); | ||
444 | 447 | ||
445 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | 448 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
446 | int len); | 449 | int len); |
@@ -740,6 +743,14 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) | |||
740 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | 743 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); |
741 | } | 744 | } |
742 | 745 | ||
746 | static inline gfn_t | ||
747 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) | ||
748 | { | ||
749 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; | ||
750 | |||
751 | return slot->base_gfn + gfn_offset; | ||
752 | } | ||
753 | |||
743 | static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, | 754 | static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, |
744 | gfn_t gfn) | 755 | gfn_t gfn) |
745 | { | 756 | { |
@@ -899,5 +910,32 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) | |||
899 | } | 910 | } |
900 | } | 911 | } |
901 | 912 | ||
913 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | ||
914 | |||
915 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | ||
916 | { | ||
917 | vcpu->spin_loop.in_spin_loop = val; | ||
918 | } | ||
919 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | ||
920 | { | ||
921 | vcpu->spin_loop.dy_eligible = val; | ||
922 | } | ||
923 | |||
924 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | ||
925 | |||
926 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | ||
927 | { | ||
928 | } | ||
929 | |||
930 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | ||
931 | { | ||
932 | } | ||
933 | |||
934 | static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) | ||
935 | { | ||
936 | return true; | ||
937 | } | ||
938 | |||
939 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | ||
902 | #endif | 940 | #endif |
903 | 941 | ||