diff options
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 145 |
1 files changed, 116 insertions, 29 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8a59e0abe5fa..93bfc9f9815c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/rcupdate.h> | 22 | #include <linux/rcupdate.h> |
23 | #include <linux/ratelimit.h> | 23 | #include <linux/ratelimit.h> |
24 | #include <linux/err.h> | ||
24 | #include <asm/signal.h> | 25 | #include <asm/signal.h> |
25 | 26 | ||
26 | #include <linux/kvm.h> | 27 | #include <linux/kvm.h> |
@@ -35,6 +36,13 @@ | |||
35 | #endif | 36 | #endif |
36 | 37 | ||
37 | /* | 38 | /* |
39 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used | ||
40 | * in kvm, other bits are visible for userspace which are defined in | ||
41 | * include/linux/kvm_h. | ||
42 | */ | ||
43 | #define KVM_MEMSLOT_INVALID (1UL << 16) | ||
44 | |||
45 | /* | ||
38 | * If we support unaligned MMIO, at most one fragment will be split into two: | 46 | * If we support unaligned MMIO, at most one fragment will be split into two: |
39 | */ | 47 | */ |
40 | #ifdef KVM_UNALIGNED_MMIO | 48 | #ifdef KVM_UNALIGNED_MMIO |
@@ -49,6 +57,47 @@ | |||
49 | (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) | 57 | (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) |
50 | 58 | ||
51 | /* | 59 | /* |
60 | * For the normal pfn, the highest 12 bits should be zero, | ||
61 | * so we can mask these bits to indicate the error. | ||
62 | */ | ||
63 | #define KVM_PFN_ERR_MASK (0xfffULL << 52) | ||
64 | |||
65 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) | ||
66 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) | ||
67 | #define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2) | ||
68 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3) | ||
69 | |||
70 | static inline bool is_error_pfn(pfn_t pfn) | ||
71 | { | ||
72 | return !!(pfn & KVM_PFN_ERR_MASK); | ||
73 | } | ||
74 | |||
75 | static inline bool is_noslot_pfn(pfn_t pfn) | ||
76 | { | ||
77 | return pfn == KVM_PFN_ERR_BAD; | ||
78 | } | ||
79 | |||
80 | static inline bool is_invalid_pfn(pfn_t pfn) | ||
81 | { | ||
82 | return !is_noslot_pfn(pfn) && is_error_pfn(pfn); | ||
83 | } | ||
84 | |||
85 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) | ||
86 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) | ||
87 | |||
88 | static inline bool kvm_is_error_hva(unsigned long addr) | ||
89 | { | ||
90 | return addr >= PAGE_OFFSET; | ||
91 | } | ||
92 | |||
93 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) | ||
94 | |||
95 | static inline bool is_error_page(struct page *page) | ||
96 | { | ||
97 | return IS_ERR(page); | ||
98 | } | ||
99 | |||
100 | /* | ||
52 | * vcpu->requests bit members | 101 | * vcpu->requests bit members |
53 | */ | 102 | */ |
54 | #define KVM_REQ_TLB_FLUSH 0 | 103 | #define KVM_REQ_TLB_FLUSH 0 |
@@ -70,7 +119,8 @@ | |||
70 | #define KVM_REQ_PMU 16 | 119 | #define KVM_REQ_PMU 16 |
71 | #define KVM_REQ_PMI 17 | 120 | #define KVM_REQ_PMI 17 |
72 | 121 | ||
73 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 122 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
123 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | ||
74 | 124 | ||
75 | struct kvm; | 125 | struct kvm; |
76 | struct kvm_vcpu; | 126 | struct kvm_vcpu; |
@@ -183,6 +233,18 @@ struct kvm_vcpu { | |||
183 | } async_pf; | 233 | } async_pf; |
184 | #endif | 234 | #endif |
185 | 235 | ||
236 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | ||
237 | /* | ||
238 | * Cpu relax intercept or pause loop exit optimization | ||
239 | * in_spin_loop: set when a vcpu does a pause loop exit | ||
240 | * or cpu relax intercepted. | ||
241 | * dy_eligible: indicates whether vcpu is eligible for directed yield. | ||
242 | */ | ||
243 | struct { | ||
244 | bool in_spin_loop; | ||
245 | bool dy_eligible; | ||
246 | } spin_loop; | ||
247 | #endif | ||
186 | struct kvm_vcpu_arch arch; | 248 | struct kvm_vcpu_arch arch; |
187 | }; | 249 | }; |
188 | 250 | ||
@@ -201,7 +263,6 @@ struct kvm_memory_slot { | |||
201 | gfn_t base_gfn; | 263 | gfn_t base_gfn; |
202 | unsigned long npages; | 264 | unsigned long npages; |
203 | unsigned long flags; | 265 | unsigned long flags; |
204 | unsigned long *rmap; | ||
205 | unsigned long *dirty_bitmap; | 266 | unsigned long *dirty_bitmap; |
206 | struct kvm_arch_memory_slot arch; | 267 | struct kvm_arch_memory_slot arch; |
207 | unsigned long userspace_addr; | 268 | unsigned long userspace_addr; |
@@ -283,6 +344,8 @@ struct kvm { | |||
283 | struct { | 344 | struct { |
284 | spinlock_t lock; | 345 | spinlock_t lock; |
285 | struct list_head items; | 346 | struct list_head items; |
347 | struct list_head resampler_list; | ||
348 | struct mutex resampler_lock; | ||
286 | } irqfds; | 349 | } irqfds; |
287 | struct list_head ioeventfds; | 350 | struct list_head ioeventfds; |
288 | #endif | 351 | #endif |
@@ -348,7 +411,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | |||
348 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); | 411 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
349 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | 412 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
350 | 413 | ||
351 | void vcpu_load(struct kvm_vcpu *vcpu); | 414 | int __must_check vcpu_load(struct kvm_vcpu *vcpu); |
352 | void vcpu_put(struct kvm_vcpu *vcpu); | 415 | void vcpu_put(struct kvm_vcpu *vcpu); |
353 | 416 | ||
354 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | 417 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
@@ -378,23 +441,6 @@ id_to_memslot(struct kvm_memslots *slots, int id) | |||
378 | return slot; | 441 | return slot; |
379 | } | 442 | } |
380 | 443 | ||
381 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | ||
382 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) | ||
383 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | ||
384 | |||
385 | extern struct page *bad_page; | ||
386 | extern struct page *fault_page; | ||
387 | |||
388 | extern pfn_t bad_pfn; | ||
389 | extern pfn_t fault_pfn; | ||
390 | |||
391 | int is_error_page(struct page *page); | ||
392 | int is_error_pfn(pfn_t pfn); | ||
393 | int is_hwpoison_pfn(pfn_t pfn); | ||
394 | int is_fault_pfn(pfn_t pfn); | ||
395 | int is_noslot_pfn(pfn_t pfn); | ||
396 | int is_invalid_pfn(pfn_t pfn); | ||
397 | int kvm_is_error_hva(unsigned long addr); | ||
398 | int kvm_set_memory_region(struct kvm *kvm, | 444 | int kvm_set_memory_region(struct kvm *kvm, |
399 | struct kvm_userspace_memory_region *mem, | 445 | struct kvm_userspace_memory_region *mem, |
400 | int user_alloc); | 446 | int user_alloc); |
@@ -415,28 +461,33 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
415 | int user_alloc); | 461 | int user_alloc); |
416 | bool kvm_largepages_enabled(void); | 462 | bool kvm_largepages_enabled(void); |
417 | void kvm_disable_largepages(void); | 463 | void kvm_disable_largepages(void); |
418 | void kvm_arch_flush_shadow(struct kvm *kvm); | 464 | /* flush all memory translations */ |
465 | void kvm_arch_flush_shadow_all(struct kvm *kvm); | ||
466 | /* flush memory translations pointing to 'slot' */ | ||
467 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | ||
468 | struct kvm_memory_slot *slot); | ||
419 | 469 | ||
420 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, | 470 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, |
421 | int nr_pages); | 471 | int nr_pages); |
422 | 472 | ||
423 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 473 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
424 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 474 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
475 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); | ||
425 | void kvm_release_page_clean(struct page *page); | 476 | void kvm_release_page_clean(struct page *page); |
426 | void kvm_release_page_dirty(struct page *page); | 477 | void kvm_release_page_dirty(struct page *page); |
427 | void kvm_set_page_dirty(struct page *page); | 478 | void kvm_set_page_dirty(struct page *page); |
428 | void kvm_set_page_accessed(struct page *page); | 479 | void kvm_set_page_accessed(struct page *page); |
429 | 480 | ||
430 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); | ||
431 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); | 481 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
432 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, | 482 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, |
433 | bool write_fault, bool *writable); | 483 | bool write_fault, bool *writable); |
434 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | 484 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
435 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | 485 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
436 | bool *writable); | 486 | bool *writable); |
437 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, | 487 | pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
438 | struct kvm_memory_slot *slot, gfn_t gfn); | 488 | pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); |
439 | void kvm_release_pfn_dirty(pfn_t); | 489 | |
490 | void kvm_release_pfn_dirty(pfn_t pfn); | ||
440 | void kvm_release_pfn_clean(pfn_t pfn); | 491 | void kvm_release_pfn_clean(pfn_t pfn); |
441 | void kvm_set_pfn_dirty(pfn_t pfn); | 492 | void kvm_set_pfn_dirty(pfn_t pfn); |
442 | void kvm_set_pfn_accessed(pfn_t pfn); | 493 | void kvm_set_pfn_accessed(pfn_t pfn); |
@@ -494,6 +545,7 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | |||
494 | struct | 545 | struct |
495 | kvm_userspace_memory_region *mem, | 546 | kvm_userspace_memory_region *mem, |
496 | int user_alloc); | 547 | int user_alloc); |
548 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); | ||
497 | long kvm_arch_vm_ioctl(struct file *filp, | 549 | long kvm_arch_vm_ioctl(struct file *filp, |
498 | unsigned int ioctl, unsigned long arg); | 550 | unsigned int ioctl, unsigned long arg); |
499 | 551 | ||
@@ -573,7 +625,7 @@ void kvm_arch_sync_events(struct kvm *kvm); | |||
573 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); | 625 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
574 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 626 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
575 | 627 | ||
576 | int kvm_is_mmio_pfn(pfn_t pfn); | 628 | bool kvm_is_mmio_pfn(pfn_t pfn); |
577 | 629 | ||
578 | struct kvm_irq_ack_notifier { | 630 | struct kvm_irq_ack_notifier { |
579 | struct hlist_node link; | 631 | struct hlist_node link; |
@@ -728,6 +780,12 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) | |||
728 | return search_memslots(slots, gfn); | 780 | return search_memslots(slots, gfn); |
729 | } | 781 | } |
730 | 782 | ||
783 | static inline unsigned long | ||
784 | __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) | ||
785 | { | ||
786 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; | ||
787 | } | ||
788 | |||
731 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) | 789 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
732 | { | 790 | { |
733 | return gfn_to_memslot(kvm, gfn)->id; | 791 | return gfn_to_memslot(kvm, gfn)->id; |
@@ -740,10 +798,12 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) | |||
740 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | 798 | (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); |
741 | } | 799 | } |
742 | 800 | ||
743 | static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, | 801 | static inline gfn_t |
744 | gfn_t gfn) | 802 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) |
745 | { | 803 | { |
746 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; | 804 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
805 | |||
806 | return slot->base_gfn + gfn_offset; | ||
747 | } | 807 | } |
748 | 808 | ||
749 | static inline gpa_t gfn_to_gpa(gfn_t gfn) | 809 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
@@ -899,5 +959,32 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) | |||
899 | } | 959 | } |
900 | } | 960 | } |
901 | 961 | ||
962 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | ||
963 | |||
964 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | ||
965 | { | ||
966 | vcpu->spin_loop.in_spin_loop = val; | ||
967 | } | ||
968 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | ||
969 | { | ||
970 | vcpu->spin_loop.dy_eligible = val; | ||
971 | } | ||
972 | |||
973 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | ||
974 | |||
975 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | ||
976 | { | ||
977 | } | ||
978 | |||
979 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | ||
980 | { | ||
981 | } | ||
982 | |||
983 | static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) | ||
984 | { | ||
985 | return true; | ||
986 | } | ||
987 | |||
988 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | ||
902 | #endif | 989 | #endif |
903 | 990 | ||