aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h145
1 files changed, 116 insertions, 29 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8a59e0abe5fa..93bfc9f9815c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/rcupdate.h> 22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h> 23#include <linux/ratelimit.h>
24#include <linux/err.h>
24#include <asm/signal.h> 25#include <asm/signal.h>
25 26
26#include <linux/kvm.h> 27#include <linux/kvm.h>
@@ -35,6 +36,13 @@
35#endif 36#endif
36 37
37/* 38/*
39 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
40 * in kvm, other bits are visible for userspace which are defined in
41 * include/linux/kvm_h.
42 */
43#define KVM_MEMSLOT_INVALID (1UL << 16)
44
45/*
38 * If we support unaligned MMIO, at most one fragment will be split into two: 46 * If we support unaligned MMIO, at most one fragment will be split into two:
39 */ 47 */
40#ifdef KVM_UNALIGNED_MMIO 48#ifdef KVM_UNALIGNED_MMIO
@@ -49,6 +57,47 @@
49 (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) 57 (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
50 58
51/* 59/*
60 * For the normal pfn, the highest 12 bits should be zero,
61 * so we can mask these bits to indicate the error.
62 */
63#define KVM_PFN_ERR_MASK (0xfffULL << 52)
64
65#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
66#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
67#define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2)
68#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3)
69
70static inline bool is_error_pfn(pfn_t pfn)
71{
72 return !!(pfn & KVM_PFN_ERR_MASK);
73}
74
75static inline bool is_noslot_pfn(pfn_t pfn)
76{
77 return pfn == KVM_PFN_ERR_BAD;
78}
79
80static inline bool is_invalid_pfn(pfn_t pfn)
81{
82 return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
83}
84
85#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
86#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
87
88static inline bool kvm_is_error_hva(unsigned long addr)
89{
90 return addr >= PAGE_OFFSET;
91}
92
93#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
94
95static inline bool is_error_page(struct page *page)
96{
97 return IS_ERR(page);
98}
99
100/*
52 * vcpu->requests bit members 101 * vcpu->requests bit members
53 */ 102 */
54#define KVM_REQ_TLB_FLUSH 0 103#define KVM_REQ_TLB_FLUSH 0
@@ -70,7 +119,8 @@
70#define KVM_REQ_PMU 16 119#define KVM_REQ_PMU 16
71#define KVM_REQ_PMI 17 120#define KVM_REQ_PMI 17
72 121
73#define KVM_USERSPACE_IRQ_SOURCE_ID 0 122#define KVM_USERSPACE_IRQ_SOURCE_ID 0
123#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
74 124
75struct kvm; 125struct kvm;
76struct kvm_vcpu; 126struct kvm_vcpu;
@@ -183,6 +233,18 @@ struct kvm_vcpu {
183 } async_pf; 233 } async_pf;
184#endif 234#endif
185 235
236#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
237 /*
238 * Cpu relax intercept or pause loop exit optimization
239 * in_spin_loop: set when a vcpu does a pause loop exit
240 * or cpu relax intercepted.
241 * dy_eligible: indicates whether vcpu is eligible for directed yield.
242 */
243 struct {
244 bool in_spin_loop;
245 bool dy_eligible;
246 } spin_loop;
247#endif
186 struct kvm_vcpu_arch arch; 248 struct kvm_vcpu_arch arch;
187}; 249};
188 250
@@ -201,7 +263,6 @@ struct kvm_memory_slot {
201 gfn_t base_gfn; 263 gfn_t base_gfn;
202 unsigned long npages; 264 unsigned long npages;
203 unsigned long flags; 265 unsigned long flags;
204 unsigned long *rmap;
205 unsigned long *dirty_bitmap; 266 unsigned long *dirty_bitmap;
206 struct kvm_arch_memory_slot arch; 267 struct kvm_arch_memory_slot arch;
207 unsigned long userspace_addr; 268 unsigned long userspace_addr;
@@ -283,6 +344,8 @@ struct kvm {
283 struct { 344 struct {
284 spinlock_t lock; 345 spinlock_t lock;
285 struct list_head items; 346 struct list_head items;
347 struct list_head resampler_list;
348 struct mutex resampler_lock;
286 } irqfds; 349 } irqfds;
287 struct list_head ioeventfds; 350 struct list_head ioeventfds;
288#endif 351#endif
@@ -348,7 +411,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
348int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 411int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
349void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 412void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
350 413
351void vcpu_load(struct kvm_vcpu *vcpu); 414int __must_check vcpu_load(struct kvm_vcpu *vcpu);
352void vcpu_put(struct kvm_vcpu *vcpu); 415void vcpu_put(struct kvm_vcpu *vcpu);
353 416
354int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 417int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
@@ -378,23 +441,6 @@ id_to_memslot(struct kvm_memslots *slots, int id)
378 return slot; 441 return slot;
379} 442}
380 443
381#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
382#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
383static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
384
385extern struct page *bad_page;
386extern struct page *fault_page;
387
388extern pfn_t bad_pfn;
389extern pfn_t fault_pfn;
390
391int is_error_page(struct page *page);
392int is_error_pfn(pfn_t pfn);
393int is_hwpoison_pfn(pfn_t pfn);
394int is_fault_pfn(pfn_t pfn);
395int is_noslot_pfn(pfn_t pfn);
396int is_invalid_pfn(pfn_t pfn);
397int kvm_is_error_hva(unsigned long addr);
398int kvm_set_memory_region(struct kvm *kvm, 444int kvm_set_memory_region(struct kvm *kvm,
399 struct kvm_userspace_memory_region *mem, 445 struct kvm_userspace_memory_region *mem,
400 int user_alloc); 446 int user_alloc);
@@ -415,28 +461,33 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
415 int user_alloc); 461 int user_alloc);
416bool kvm_largepages_enabled(void); 462bool kvm_largepages_enabled(void);
417void kvm_disable_largepages(void); 463void kvm_disable_largepages(void);
418void kvm_arch_flush_shadow(struct kvm *kvm); 464/* flush all memory translations */
465void kvm_arch_flush_shadow_all(struct kvm *kvm);
466/* flush memory translations pointing to 'slot' */
467void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
468 struct kvm_memory_slot *slot);
419 469
420int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 470int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
421 int nr_pages); 471 int nr_pages);
422 472
423struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 473struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
424unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 474unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
475unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
425void kvm_release_page_clean(struct page *page); 476void kvm_release_page_clean(struct page *page);
426void kvm_release_page_dirty(struct page *page); 477void kvm_release_page_dirty(struct page *page);
427void kvm_set_page_dirty(struct page *page); 478void kvm_set_page_dirty(struct page *page);
428void kvm_set_page_accessed(struct page *page); 479void kvm_set_page_accessed(struct page *page);
429 480
430pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
431pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 481pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
432pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 482pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
433 bool write_fault, bool *writable); 483 bool write_fault, bool *writable);
434pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 484pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
435pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 485pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
436 bool *writable); 486 bool *writable);
437pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 487pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
438 struct kvm_memory_slot *slot, gfn_t gfn); 488pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
439void kvm_release_pfn_dirty(pfn_t); 489
490void kvm_release_pfn_dirty(pfn_t pfn);
440void kvm_release_pfn_clean(pfn_t pfn); 491void kvm_release_pfn_clean(pfn_t pfn);
441void kvm_set_pfn_dirty(pfn_t pfn); 492void kvm_set_pfn_dirty(pfn_t pfn);
442void kvm_set_pfn_accessed(pfn_t pfn); 493void kvm_set_pfn_accessed(pfn_t pfn);
@@ -494,6 +545,7 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
494 struct 545 struct
495 kvm_userspace_memory_region *mem, 546 kvm_userspace_memory_region *mem,
496 int user_alloc); 547 int user_alloc);
548int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
497long kvm_arch_vm_ioctl(struct file *filp, 549long kvm_arch_vm_ioctl(struct file *filp,
498 unsigned int ioctl, unsigned long arg); 550 unsigned int ioctl, unsigned long arg);
499 551
@@ -573,7 +625,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
573int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 625int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
574void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 626void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
575 627
576int kvm_is_mmio_pfn(pfn_t pfn); 628bool kvm_is_mmio_pfn(pfn_t pfn);
577 629
578struct kvm_irq_ack_notifier { 630struct kvm_irq_ack_notifier {
579 struct hlist_node link; 631 struct hlist_node link;
@@ -728,6 +780,12 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
728 return search_memslots(slots, gfn); 780 return search_memslots(slots, gfn);
729} 781}
730 782
783static inline unsigned long
784__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
785{
786 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
787}
788
731static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 789static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
732{ 790{
733 return gfn_to_memslot(kvm, gfn)->id; 791 return gfn_to_memslot(kvm, gfn)->id;
@@ -740,10 +798,12 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
740 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); 798 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
741} 799}
742 800
743static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 801static inline gfn_t
744 gfn_t gfn) 802hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
745{ 803{
746 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; 804 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
805
806 return slot->base_gfn + gfn_offset;
747} 807}
748 808
749static inline gpa_t gfn_to_gpa(gfn_t gfn) 809static inline gpa_t gfn_to_gpa(gfn_t gfn)
@@ -899,5 +959,32 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
899 } 959 }
900} 960}
901 961
962#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
963
964static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
965{
966 vcpu->spin_loop.in_spin_loop = val;
967}
968static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
969{
970 vcpu->spin_loop.dy_eligible = val;
971}
972
973#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
974
975static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
976{
977}
978
979static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
980{
981}
982
983static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
984{
985 return true;
986}
987
988#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
902#endif 989#endif
903 990