aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h71
1 files changed, 44 insertions, 27 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd5a616d9373..a3fd0f91d943 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -38,6 +38,7 @@
38#define KVM_REQ_MMU_SYNC 7 38#define KVM_REQ_MMU_SYNC 7
39#define KVM_REQ_KVMCLOCK_UPDATE 8 39#define KVM_REQ_KVMCLOCK_UPDATE 8
40#define KVM_REQ_KICK 9 40#define KVM_REQ_KICK 9
41#define KVM_REQ_DEACTIVATE_FPU 10
41 42
42#define KVM_USERSPACE_IRQ_SOURCE_ID 0 43#define KVM_USERSPACE_IRQ_SOURCE_ID 0
43 44
@@ -57,20 +58,20 @@ struct kvm_io_bus {
57 struct kvm_io_device *devs[NR_IOBUS_DEVS]; 58 struct kvm_io_device *devs[NR_IOBUS_DEVS];
58}; 59};
59 60
60void kvm_io_bus_init(struct kvm_io_bus *bus); 61enum kvm_bus {
61void kvm_io_bus_destroy(struct kvm_io_bus *bus); 62 KVM_MMIO_BUS,
62int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len, 63 KVM_PIO_BUS,
63 const void *val); 64 KVM_NR_BUSES
64int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, 65};
66
67int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
68 int len, const void *val);
69int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
65 void *val); 70 void *val);
66int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, 71int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
67 struct kvm_io_device *dev);
68int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
69 struct kvm_io_device *dev); 72 struct kvm_io_device *dev);
70void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, 73int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
71 struct kvm_io_device *dev); 74 struct kvm_io_device *dev);
72void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
73 struct kvm_io_device *dev);
74 75
75struct kvm_vcpu { 76struct kvm_vcpu {
76 struct kvm *kvm; 77 struct kvm *kvm;
@@ -83,6 +84,8 @@ struct kvm_vcpu {
83 struct kvm_run *run; 84 struct kvm_run *run;
84 unsigned long requests; 85 unsigned long requests;
85 unsigned long guest_debug; 86 unsigned long guest_debug;
87 int srcu_idx;
88
86 int fpu_active; 89 int fpu_active;
87 int guest_fpu_loaded; 90 int guest_fpu_loaded;
88 wait_queue_head_t wq; 91 wait_queue_head_t wq;
@@ -150,14 +153,19 @@ struct kvm_irq_routing_table {};
150 153
151#endif 154#endif
152 155
153struct kvm { 156struct kvm_memslots {
154 spinlock_t mmu_lock;
155 spinlock_t requests_lock;
156 struct rw_semaphore slots_lock;
157 struct mm_struct *mm; /* userspace tied to this vm */
158 int nmemslots; 157 int nmemslots;
159 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 158 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
160 KVM_PRIVATE_MEM_SLOTS]; 159 KVM_PRIVATE_MEM_SLOTS];
160};
161
162struct kvm {
163 spinlock_t mmu_lock;
164 raw_spinlock_t requests_lock;
165 struct mutex slots_lock;
166 struct mm_struct *mm; /* userspace tied to this vm */
167 struct kvm_memslots *memslots;
168 struct srcu_struct srcu;
161#ifdef CONFIG_KVM_APIC_ARCHITECTURE 169#ifdef CONFIG_KVM_APIC_ARCHITECTURE
162 u32 bsp_vcpu_id; 170 u32 bsp_vcpu_id;
163 struct kvm_vcpu *bsp_vcpu; 171 struct kvm_vcpu *bsp_vcpu;
@@ -166,8 +174,7 @@ struct kvm {
166 atomic_t online_vcpus; 174 atomic_t online_vcpus;
167 struct list_head vm_list; 175 struct list_head vm_list;
168 struct mutex lock; 176 struct mutex lock;
169 struct kvm_io_bus mmio_bus; 177 struct kvm_io_bus *buses[KVM_NR_BUSES];
170 struct kvm_io_bus pio_bus;
171#ifdef CONFIG_HAVE_KVM_EVENTFD 178#ifdef CONFIG_HAVE_KVM_EVENTFD
172 struct { 179 struct {
173 spinlock_t lock; 180 spinlock_t lock;
@@ -249,13 +256,20 @@ int kvm_set_memory_region(struct kvm *kvm,
249int __kvm_set_memory_region(struct kvm *kvm, 256int __kvm_set_memory_region(struct kvm *kvm,
250 struct kvm_userspace_memory_region *mem, 257 struct kvm_userspace_memory_region *mem,
251 int user_alloc); 258 int user_alloc);
252int kvm_arch_set_memory_region(struct kvm *kvm, 259int kvm_arch_prepare_memory_region(struct kvm *kvm,
260 struct kvm_memory_slot *memslot,
261 struct kvm_memory_slot old,
262 struct kvm_userspace_memory_region *mem,
263 int user_alloc);
264void kvm_arch_commit_memory_region(struct kvm *kvm,
253 struct kvm_userspace_memory_region *mem, 265 struct kvm_userspace_memory_region *mem,
254 struct kvm_memory_slot old, 266 struct kvm_memory_slot old,
255 int user_alloc); 267 int user_alloc);
256void kvm_disable_largepages(void); 268void kvm_disable_largepages(void);
257void kvm_arch_flush_shadow(struct kvm *kvm); 269void kvm_arch_flush_shadow(struct kvm *kvm);
258gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 270gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
271gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn);
272
259struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 273struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
260unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 274unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
261void kvm_release_page_clean(struct page *page); 275void kvm_release_page_clean(struct page *page);
@@ -264,6 +278,9 @@ void kvm_set_page_dirty(struct page *page);
264void kvm_set_page_accessed(struct page *page); 278void kvm_set_page_accessed(struct page *page);
265 279
266pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 280pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
281pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
282 struct kvm_memory_slot *slot, gfn_t gfn);
283int memslot_id(struct kvm *kvm, gfn_t gfn);
267void kvm_release_pfn_dirty(pfn_t); 284void kvm_release_pfn_dirty(pfn_t);
268void kvm_release_pfn_clean(pfn_t pfn); 285void kvm_release_pfn_clean(pfn_t pfn);
269void kvm_set_pfn_dirty(pfn_t pfn); 286void kvm_set_pfn_dirty(pfn_t pfn);
@@ -283,6 +300,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
283int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 300int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
284struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 301struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
285int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 302int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
303unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
286void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 304void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
287 305
288void kvm_vcpu_block(struct kvm_vcpu *vcpu); 306void kvm_vcpu_block(struct kvm_vcpu *vcpu);
@@ -383,6 +401,7 @@ struct kvm_assigned_dev_kernel {
383 struct work_struct interrupt_work; 401 struct work_struct interrupt_work;
384 struct list_head list; 402 struct list_head list;
385 int assigned_dev_id; 403 int assigned_dev_id;
404 int host_segnr;
386 int host_busnr; 405 int host_busnr;
387 int host_devfn; 406 int host_devfn;
388 unsigned int entries_nr; 407 unsigned int entries_nr;
@@ -429,8 +448,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
429#define KVM_IOMMU_CACHE_COHERENCY 0x1 448#define KVM_IOMMU_CACHE_COHERENCY 0x1
430 449
431#ifdef CONFIG_IOMMU_API 450#ifdef CONFIG_IOMMU_API
432int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, 451int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
433 unsigned long npages);
434int kvm_iommu_map_guest(struct kvm *kvm); 452int kvm_iommu_map_guest(struct kvm *kvm);
435int kvm_iommu_unmap_guest(struct kvm *kvm); 453int kvm_iommu_unmap_guest(struct kvm *kvm);
436int kvm_assign_device(struct kvm *kvm, 454int kvm_assign_device(struct kvm *kvm,
@@ -480,11 +498,6 @@ static inline void kvm_guest_exit(void)
480 current->flags &= ~PF_VCPU; 498 current->flags &= ~PF_VCPU;
481} 499}
482 500
483static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
484{
485 return slot - kvm->memslots;
486}
487
488static inline gpa_t gfn_to_gpa(gfn_t gfn) 501static inline gpa_t gfn_to_gpa(gfn_t gfn)
489{ 502{
490 return (gpa_t)gfn << PAGE_SHIFT; 503 return (gpa_t)gfn << PAGE_SHIFT;
@@ -532,6 +545,10 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
532} 545}
533#endif 546#endif
534 547
548#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
549#define unalias_gfn_instantiation unalias_gfn
550#endif
551
535#ifdef CONFIG_HAVE_KVM_IRQCHIP 552#ifdef CONFIG_HAVE_KVM_IRQCHIP
536 553
537#define KVM_MAX_IRQ_ROUTES 1024 554#define KVM_MAX_IRQ_ROUTES 1024