aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h55
1 files changed, 46 insertions, 9 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 72cbf08d45fb..c4464356b35b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -35,6 +35,20 @@
35#endif 35#endif
36 36
37/* 37/*
38 * If we support unaligned MMIO, at most one fragment will be split into two:
39 */
40#ifdef KVM_UNALIGNED_MMIO
41# define KVM_EXTRA_MMIO_FRAGMENTS 1
42#else
43# define KVM_EXTRA_MMIO_FRAGMENTS 0
44#endif
45
46#define KVM_USER_MMIO_SIZE 8
47
48#define KVM_MAX_MMIO_FRAGMENTS \
49 (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
50
51/*
38 * vcpu->requests bit members 52 * vcpu->requests bit members
39 */ 53 */
40#define KVM_REQ_TLB_FLUSH 0 54#define KVM_REQ_TLB_FLUSH 0
@@ -68,10 +82,11 @@ struct kvm_io_range {
68 struct kvm_io_device *dev; 82 struct kvm_io_device *dev;
69}; 83};
70 84
85#define NR_IOBUS_DEVS 1000
86
71struct kvm_io_bus { 87struct kvm_io_bus {
72 int dev_count; 88 int dev_count;
73#define NR_IOBUS_DEVS 300 89 struct kvm_io_range range[];
74 struct kvm_io_range range[NR_IOBUS_DEVS];
75}; 90};
76 91
77enum kvm_bus { 92enum kvm_bus {
@@ -113,7 +128,18 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
113enum { 128enum {
114 OUTSIDE_GUEST_MODE, 129 OUTSIDE_GUEST_MODE,
115 IN_GUEST_MODE, 130 IN_GUEST_MODE,
116 EXITING_GUEST_MODE 131 EXITING_GUEST_MODE,
132 READING_SHADOW_PAGE_TABLES,
133};
134
135/*
136 * Sometimes a large or cross-page mmio needs to be broken up into separate
137 * exits for userspace servicing.
138 */
139struct kvm_mmio_fragment {
140 gpa_t gpa;
141 void *data;
142 unsigned len;
117}; 143};
118 144
119struct kvm_vcpu { 145struct kvm_vcpu {
@@ -143,10 +169,9 @@ struct kvm_vcpu {
143 int mmio_needed; 169 int mmio_needed;
144 int mmio_read_completed; 170 int mmio_read_completed;
145 int mmio_is_write; 171 int mmio_is_write;
146 int mmio_size; 172 int mmio_cur_fragment;
147 int mmio_index; 173 int mmio_nr_fragments;
148 unsigned char mmio_data[KVM_MMIO_SIZE]; 174 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
149 gpa_t mmio_phys_addr;
150#endif 175#endif
151 176
152#ifdef CONFIG_KVM_ASYNC_PF 177#ifdef CONFIG_KVM_ASYNC_PF
@@ -178,8 +203,6 @@ struct kvm_memory_slot {
178 unsigned long flags; 203 unsigned long flags;
179 unsigned long *rmap; 204 unsigned long *rmap;
180 unsigned long *dirty_bitmap; 205 unsigned long *dirty_bitmap;
181 unsigned long *dirty_bitmap_head;
182 unsigned long nr_dirty_pages;
183 struct kvm_arch_memory_slot arch; 206 struct kvm_arch_memory_slot arch;
184 unsigned long userspace_addr; 207 unsigned long userspace_addr;
185 int user_alloc; 208 int user_alloc;
@@ -438,6 +461,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
438 gfn_t gfn); 461 gfn_t gfn);
439 462
440void kvm_vcpu_block(struct kvm_vcpu *vcpu); 463void kvm_vcpu_block(struct kvm_vcpu *vcpu);
464void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
465bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
441void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); 466void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
442void kvm_resched(struct kvm_vcpu *vcpu); 467void kvm_resched(struct kvm_vcpu *vcpu);
443void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 468void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
@@ -506,6 +531,7 @@ int kvm_arch_hardware_setup(void);
506void kvm_arch_hardware_unsetup(void); 531void kvm_arch_hardware_unsetup(void);
507void kvm_arch_check_processor_compat(void *rtn); 532void kvm_arch_check_processor_compat(void *rtn);
508int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 533int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
534int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
509 535
510void kvm_free_physmem(struct kvm *kvm); 536void kvm_free_physmem(struct kvm *kvm);
511 537
@@ -521,6 +547,15 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
521} 547}
522#endif 548#endif
523 549
550static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
551{
552#ifdef __KVM_HAVE_ARCH_WQP
553 return vcpu->arch.wqp;
554#else
555 return &vcpu->wq;
556#endif
557}
558
524int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 559int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
525void kvm_arch_destroy_vm(struct kvm *kvm); 560void kvm_arch_destroy_vm(struct kvm *kvm);
526void kvm_free_all_assigned_devices(struct kvm *kvm); 561void kvm_free_all_assigned_devices(struct kvm *kvm);
@@ -769,6 +804,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
769 unsigned flags); 804 unsigned flags);
770void kvm_free_irq_routing(struct kvm *kvm); 805void kvm_free_irq_routing(struct kvm *kvm);
771 806
807int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
808
772#else 809#else
773 810
774static inline void kvm_free_irq_routing(struct kvm *kvm) {} 811static inline void kvm_free_irq_routing(struct kvm *kvm) {}