aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h119
1 files changed, 86 insertions, 33 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd5a616d9373..c13cc48697aa 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -38,6 +38,7 @@
38#define KVM_REQ_MMU_SYNC 7 38#define KVM_REQ_MMU_SYNC 7
39#define KVM_REQ_KVMCLOCK_UPDATE 8 39#define KVM_REQ_KVMCLOCK_UPDATE 8
40#define KVM_REQ_KICK 9 40#define KVM_REQ_KICK 9
41#define KVM_REQ_DEACTIVATE_FPU 10
41 42
42#define KVM_USERSPACE_IRQ_SOURCE_ID 0 43#define KVM_USERSPACE_IRQ_SOURCE_ID 0
43 44
@@ -53,24 +54,24 @@ extern struct kmem_cache *kvm_vcpu_cache;
53 */ 54 */
54struct kvm_io_bus { 55struct kvm_io_bus {
55 int dev_count; 56 int dev_count;
56#define NR_IOBUS_DEVS 6 57#define NR_IOBUS_DEVS 200
57 struct kvm_io_device *devs[NR_IOBUS_DEVS]; 58 struct kvm_io_device *devs[NR_IOBUS_DEVS];
58}; 59};
59 60
60void kvm_io_bus_init(struct kvm_io_bus *bus); 61enum kvm_bus {
61void kvm_io_bus_destroy(struct kvm_io_bus *bus); 62 KVM_MMIO_BUS,
62int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len, 63 KVM_PIO_BUS,
63 const void *val); 64 KVM_NR_BUSES
64int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, 65};
66
67int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
68 int len, const void *val);
69int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
65 void *val); 70 void *val);
66int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, 71int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
67 struct kvm_io_device *dev);
68int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
69 struct kvm_io_device *dev); 72 struct kvm_io_device *dev);
70void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, 73int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
71 struct kvm_io_device *dev); 74 struct kvm_io_device *dev);
72void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
73 struct kvm_io_device *dev);
74 75
75struct kvm_vcpu { 76struct kvm_vcpu {
76 struct kvm *kvm; 77 struct kvm *kvm;
@@ -80,11 +81,14 @@ struct kvm_vcpu {
80 int vcpu_id; 81 int vcpu_id;
81 struct mutex mutex; 82 struct mutex mutex;
82 int cpu; 83 int cpu;
84 atomic_t guest_mode;
83 struct kvm_run *run; 85 struct kvm_run *run;
84 unsigned long requests; 86 unsigned long requests;
85 unsigned long guest_debug; 87 unsigned long guest_debug;
88 int srcu_idx;
89
86 int fpu_active; 90 int fpu_active;
87 int guest_fpu_loaded; 91 int guest_fpu_loaded, guest_xcr0_loaded;
88 wait_queue_head_t wq; 92 wait_queue_head_t wq;
89 int sigset_active; 93 int sigset_active;
90 sigset_t sigset; 94 sigset_t sigset;
@@ -102,6 +106,12 @@ struct kvm_vcpu {
102 struct kvm_vcpu_arch arch; 106 struct kvm_vcpu_arch arch;
103}; 107};
104 108
109/*
110 * Some of the bitops functions do not support too long bitmaps.
111 * This number must be determined not to exceed such limits.
112 */
113#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
114
105struct kvm_memory_slot { 115struct kvm_memory_slot {
106 gfn_t base_gfn; 116 gfn_t base_gfn;
107 unsigned long npages; 117 unsigned long npages;
@@ -114,8 +124,14 @@ struct kvm_memory_slot {
114 } *lpage_info[KVM_NR_PAGE_SIZES - 1]; 124 } *lpage_info[KVM_NR_PAGE_SIZES - 1];
115 unsigned long userspace_addr; 125 unsigned long userspace_addr;
116 int user_alloc; 126 int user_alloc;
127 int id;
117}; 128};
118 129
130static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
131{
132 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
133}
134
119struct kvm_kernel_irq_routing_entry { 135struct kvm_kernel_irq_routing_entry {
120 u32 gsi; 136 u32 gsi;
121 u32 type; 137 u32 type;
@@ -150,14 +166,19 @@ struct kvm_irq_routing_table {};
150 166
151#endif 167#endif
152 168
153struct kvm { 169struct kvm_memslots {
154 spinlock_t mmu_lock;
155 spinlock_t requests_lock;
156 struct rw_semaphore slots_lock;
157 struct mm_struct *mm; /* userspace tied to this vm */
158 int nmemslots; 170 int nmemslots;
159 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 171 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
160 KVM_PRIVATE_MEM_SLOTS]; 172 KVM_PRIVATE_MEM_SLOTS];
173};
174
175struct kvm {
176 spinlock_t mmu_lock;
177 raw_spinlock_t requests_lock;
178 struct mutex slots_lock;
179 struct mm_struct *mm; /* userspace tied to this vm */
180 struct kvm_memslots *memslots;
181 struct srcu_struct srcu;
161#ifdef CONFIG_KVM_APIC_ARCHITECTURE 182#ifdef CONFIG_KVM_APIC_ARCHITECTURE
162 u32 bsp_vcpu_id; 183 u32 bsp_vcpu_id;
163 struct kvm_vcpu *bsp_vcpu; 184 struct kvm_vcpu *bsp_vcpu;
@@ -166,8 +187,7 @@ struct kvm {
166 atomic_t online_vcpus; 187 atomic_t online_vcpus;
167 struct list_head vm_list; 188 struct list_head vm_list;
168 struct mutex lock; 189 struct mutex lock;
169 struct kvm_io_bus mmio_bus; 190 struct kvm_io_bus *buses[KVM_NR_BUSES];
170 struct kvm_io_bus pio_bus;
171#ifdef CONFIG_HAVE_KVM_EVENTFD 191#ifdef CONFIG_HAVE_KVM_EVENTFD
172 struct { 192 struct {
173 spinlock_t lock; 193 spinlock_t lock;
@@ -225,23 +245,31 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
225void vcpu_load(struct kvm_vcpu *vcpu); 245void vcpu_load(struct kvm_vcpu *vcpu);
226void vcpu_put(struct kvm_vcpu *vcpu); 246void vcpu_put(struct kvm_vcpu *vcpu);
227 247
228int kvm_init(void *opaque, unsigned int vcpu_size, 248int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
229 struct module *module); 249 struct module *module);
230void kvm_exit(void); 250void kvm_exit(void);
231 251
232void kvm_get_kvm(struct kvm *kvm); 252void kvm_get_kvm(struct kvm *kvm);
233void kvm_put_kvm(struct kvm *kvm); 253void kvm_put_kvm(struct kvm *kvm);
234 254
255static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
256{
257 return rcu_dereference_check(kvm->memslots,
258 srcu_read_lock_held(&kvm->srcu)
259 || lockdep_is_held(&kvm->slots_lock));
260}
261
235#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 262#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
236#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) 263#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
237static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 264static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
238struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
239 265
240extern struct page *bad_page; 266extern struct page *bad_page;
241extern pfn_t bad_pfn; 267extern pfn_t bad_pfn;
242 268
243int is_error_page(struct page *page); 269int is_error_page(struct page *page);
244int is_error_pfn(pfn_t pfn); 270int is_error_pfn(pfn_t pfn);
271int is_hwpoison_pfn(pfn_t pfn);
272int is_fault_pfn(pfn_t pfn);
245int kvm_is_error_hva(unsigned long addr); 273int kvm_is_error_hva(unsigned long addr);
246int kvm_set_memory_region(struct kvm *kvm, 274int kvm_set_memory_region(struct kvm *kvm,
247 struct kvm_userspace_memory_region *mem, 275 struct kvm_userspace_memory_region *mem,
@@ -249,13 +277,18 @@ int kvm_set_memory_region(struct kvm *kvm,
249int __kvm_set_memory_region(struct kvm *kvm, 277int __kvm_set_memory_region(struct kvm *kvm,
250 struct kvm_userspace_memory_region *mem, 278 struct kvm_userspace_memory_region *mem,
251 int user_alloc); 279 int user_alloc);
252int kvm_arch_set_memory_region(struct kvm *kvm, 280int kvm_arch_prepare_memory_region(struct kvm *kvm,
281 struct kvm_memory_slot *memslot,
282 struct kvm_memory_slot old,
283 struct kvm_userspace_memory_region *mem,
284 int user_alloc);
285void kvm_arch_commit_memory_region(struct kvm *kvm,
253 struct kvm_userspace_memory_region *mem, 286 struct kvm_userspace_memory_region *mem,
254 struct kvm_memory_slot old, 287 struct kvm_memory_slot old,
255 int user_alloc); 288 int user_alloc);
256void kvm_disable_largepages(void); 289void kvm_disable_largepages(void);
257void kvm_arch_flush_shadow(struct kvm *kvm); 290void kvm_arch_flush_shadow(struct kvm *kvm);
258gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 291
259struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 292struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
260unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 293unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
261void kvm_release_page_clean(struct page *page); 294void kvm_release_page_clean(struct page *page);
@@ -264,6 +297,9 @@ void kvm_set_page_dirty(struct page *page);
264void kvm_set_page_accessed(struct page *page); 297void kvm_set_page_accessed(struct page *page);
265 298
266pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 299pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
300pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
301 struct kvm_memory_slot *slot, gfn_t gfn);
302int memslot_id(struct kvm *kvm, gfn_t gfn);
267void kvm_release_pfn_dirty(pfn_t); 303void kvm_release_pfn_dirty(pfn_t);
268void kvm_release_pfn_clean(pfn_t pfn); 304void kvm_release_pfn_clean(pfn_t pfn);
269void kvm_set_pfn_dirty(pfn_t pfn); 305void kvm_set_pfn_dirty(pfn_t pfn);
@@ -283,6 +319,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
283int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 319int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
284struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 320struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
285int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 321int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
322unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
286void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 323void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
287 324
288void kvm_vcpu_block(struct kvm_vcpu *vcpu); 325void kvm_vcpu_block(struct kvm_vcpu *vcpu);
@@ -383,6 +420,7 @@ struct kvm_assigned_dev_kernel {
383 struct work_struct interrupt_work; 420 struct work_struct interrupt_work;
384 struct list_head list; 421 struct list_head list;
385 int assigned_dev_id; 422 int assigned_dev_id;
423 int host_segnr;
386 int host_busnr; 424 int host_busnr;
387 int host_devfn; 425 int host_devfn;
388 unsigned int entries_nr; 426 unsigned int entries_nr;
@@ -409,7 +447,8 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
409 struct kvm_irq_mask_notifier *kimn); 447 struct kvm_irq_mask_notifier *kimn);
410void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 448void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
411 struct kvm_irq_mask_notifier *kimn); 449 struct kvm_irq_mask_notifier *kimn);
412void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); 450void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
451 bool mask);
413 452
414#ifdef __KVM_HAVE_IOAPIC 453#ifdef __KVM_HAVE_IOAPIC
415void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, 454void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
@@ -429,8 +468,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
429#define KVM_IOMMU_CACHE_COHERENCY 0x1 468#define KVM_IOMMU_CACHE_COHERENCY 0x1
430 469
431#ifdef CONFIG_IOMMU_API 470#ifdef CONFIG_IOMMU_API
432int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, 471int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
433 unsigned long npages);
434int kvm_iommu_map_guest(struct kvm *kvm); 472int kvm_iommu_map_guest(struct kvm *kvm);
435int kvm_iommu_unmap_guest(struct kvm *kvm); 473int kvm_iommu_unmap_guest(struct kvm *kvm);
436int kvm_assign_device(struct kvm *kvm, 474int kvm_assign_device(struct kvm *kvm,
@@ -480,11 +518,6 @@ static inline void kvm_guest_exit(void)
480 current->flags &= ~PF_VCPU; 518 current->flags &= ~PF_VCPU;
481} 519}
482 520
483static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
484{
485 return slot - kvm->memslots;
486}
487
488static inline gpa_t gfn_to_gpa(gfn_t gfn) 521static inline gpa_t gfn_to_gpa(gfn_t gfn)
489{ 522{
490 return (gpa_t)gfn << PAGE_SHIFT; 523 return (gpa_t)gfn << PAGE_SHIFT;
@@ -594,5 +627,25 @@ static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
594 627
595#endif 628#endif
596 629
630static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
631{
632 set_bit(req, &vcpu->requests);
633}
634
635static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu)
636{
637 return test_and_set_bit(req, &vcpu->requests);
638}
639
640static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
641{
642 if (test_bit(req, &vcpu->requests)) {
643 clear_bit(req, &vcpu->requests);
644 return true;
645 } else {
646 return false;
647 }
648}
649
597#endif 650#endif
598 651