aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-07-26 04:54:21 -0400
committerAvi Kivity <avi@redhat.com>2012-07-26 04:54:21 -0400
commite9bda6f6f902e6b55d9baceb5523468a048cbe56 (patch)
treebf09cc165da1197cd34967da0593d08b9a37c0f3 /include
parentbdc0077af574800d24318b6945cf2344e8dbb050 (diff)
parent06e48c510aa37f6e791602e6420422ea7071fe94 (diff)
Merge branch 'queue' into next
Merge patches queued during the run-up to the merge window. * queue: (25 commits) KVM: Choose better candidate for directed yield KVM: Note down when cpu relax intercepted or pause loop exited KVM: Add config to support ple or cpu relax optimzation KVM: switch to symbolic name for irq_states size KVM: x86: Fix typos in pmu.c KVM: x86: Fix typos in lapic.c KVM: x86: Fix typos in cpuid.c KVM: x86: Fix typos in emulate.c KVM: x86: Fix typos in x86.c KVM: SVM: Fix typos KVM: VMX: Fix typos KVM: remove the unused parameter of gfn_to_pfn_memslot KVM: remove is_error_hpa KVM: make bad_pfn static to kvm_main.c KVM: using get_fault_pfn to get the fault pfn KVM: MMU: track the refcount when unmap the page KVM: x86: remove unnecessary mark_page_dirty KVM: MMU: Avoid handling same rmap_pde in kvm_handle_hva_range() KVM: MMU: Push trace_kvm_age_page() into kvm_age_rmapp() KVM: MMU: Add memslot parameter to hva handlers ... Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/kvm_host.h62
1 files changed, 50 insertions, 12 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b70b48b01098..1993eb1cb2cd 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -183,6 +183,18 @@ struct kvm_vcpu {
183 } async_pf; 183 } async_pf;
184#endif 184#endif
185 185
186#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
187 /*
188 * Cpu relax intercept or pause loop exit optimization
189 * in_spin_loop: set when a vcpu does a pause loop exit
190 * or cpu relax intercepted.
191 * dy_eligible: indicates whether vcpu is eligible for directed yield.
192 */
193 struct {
194 bool in_spin_loop;
195 bool dy_eligible;
196 } spin_loop;
197#endif
186 struct kvm_vcpu_arch arch; 198 struct kvm_vcpu_arch arch;
187}; 199};
188 200
@@ -378,20 +390,11 @@ id_to_memslot(struct kvm_memslots *slots, int id)
378 return slot; 390 return slot;
379} 391}
380 392
381#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
382#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
383static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
384
385extern struct page *bad_page; 393extern struct page *bad_page;
386extern struct page *fault_page;
387
388extern pfn_t bad_pfn;
389extern pfn_t fault_pfn;
390 394
391int is_error_page(struct page *page); 395int is_error_page(struct page *page);
392int is_error_pfn(pfn_t pfn); 396int is_error_pfn(pfn_t pfn);
393int is_hwpoison_pfn(pfn_t pfn); 397int is_hwpoison_pfn(pfn_t pfn);
394int is_fault_pfn(pfn_t pfn);
395int is_noslot_pfn(pfn_t pfn); 398int is_noslot_pfn(pfn_t pfn);
396int is_invalid_pfn(pfn_t pfn); 399int is_invalid_pfn(pfn_t pfn);
397int kvm_is_error_hva(unsigned long addr); 400int kvm_is_error_hva(unsigned long addr);
@@ -427,20 +430,20 @@ void kvm_release_page_dirty(struct page *page);
427void kvm_set_page_dirty(struct page *page); 430void kvm_set_page_dirty(struct page *page);
428void kvm_set_page_accessed(struct page *page); 431void kvm_set_page_accessed(struct page *page);
429 432
430pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); 433pfn_t hva_to_pfn_atomic(unsigned long addr);
431pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 434pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
432pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 435pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
433 bool write_fault, bool *writable); 436 bool write_fault, bool *writable);
434pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 437pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
435pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 438pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
436 bool *writable); 439 bool *writable);
437pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 440pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
438 struct kvm_memory_slot *slot, gfn_t gfn);
439void kvm_release_pfn_dirty(pfn_t); 441void kvm_release_pfn_dirty(pfn_t);
440void kvm_release_pfn_clean(pfn_t pfn); 442void kvm_release_pfn_clean(pfn_t pfn);
441void kvm_set_pfn_dirty(pfn_t pfn); 443void kvm_set_pfn_dirty(pfn_t pfn);
442void kvm_set_pfn_accessed(pfn_t pfn); 444void kvm_set_pfn_accessed(pfn_t pfn);
443void kvm_get_pfn(pfn_t pfn); 445void kvm_get_pfn(pfn_t pfn);
446pfn_t get_fault_pfn(void);
444 447
445int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 448int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
446 int len); 449 int len);
@@ -740,6 +743,14 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
740 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); 743 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
741} 744}
742 745
746static inline gfn_t
747hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
748{
749 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
750
751 return slot->base_gfn + gfn_offset;
752}
753
743static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 754static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
744 gfn_t gfn) 755 gfn_t gfn)
745{ 756{
@@ -899,5 +910,32 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
899 } 910 }
900} 911}
901 912
913#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
914
915static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
916{
917 vcpu->spin_loop.in_spin_loop = val;
918}
919static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
920{
921 vcpu->spin_loop.dy_eligible = val;
922}
923
924#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
925
926static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
927{
928}
929
930static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
931{
932}
933
934static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
935{
936 return true;
937}
938
939#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
902#endif 940#endif
903 941