aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r--include/linux/kvm_host.h115
1 files changed, 71 insertions, 44 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3060bdc35ffe..b7bbb5ddd7ae 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -15,7 +15,6 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/preempt.h> 17#include <linux/preempt.h>
18#include <linux/marker.h>
19#include <linux/msi.h> 18#include <linux/msi.h>
20#include <asm/signal.h> 19#include <asm/signal.h>
21 20
@@ -42,6 +41,7 @@
42 41
43#define KVM_USERSPACE_IRQ_SOURCE_ID 0 42#define KVM_USERSPACE_IRQ_SOURCE_ID 0
44 43
44struct kvm;
45struct kvm_vcpu; 45struct kvm_vcpu;
46extern struct kmem_cache *kvm_vcpu_cache; 46extern struct kmem_cache *kvm_vcpu_cache;
47 47
@@ -59,10 +59,18 @@ struct kvm_io_bus {
59 59
60void kvm_io_bus_init(struct kvm_io_bus *bus); 60void kvm_io_bus_init(struct kvm_io_bus *bus);
61void kvm_io_bus_destroy(struct kvm_io_bus *bus); 61void kvm_io_bus_destroy(struct kvm_io_bus *bus);
62struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, 62int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len,
63 gpa_t addr, int len, int is_write); 63 const void *val);
64void kvm_io_bus_register_dev(struct kvm_io_bus *bus, 64int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len,
65 struct kvm_io_device *dev); 65 void *val);
66int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
67 struct kvm_io_device *dev);
68int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
69 struct kvm_io_device *dev);
70void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
71 struct kvm_io_device *dev);
72void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
73 struct kvm_io_device *dev);
66 74
67struct kvm_vcpu { 75struct kvm_vcpu {
68 struct kvm *kvm; 76 struct kvm *kvm;
@@ -103,7 +111,7 @@ struct kvm_memory_slot {
103 struct { 111 struct {
104 unsigned long rmap_pde; 112 unsigned long rmap_pde;
105 int write_count; 113 int write_count;
106 } *lpage_info; 114 } *lpage_info[KVM_NR_PAGE_SIZES - 1];
107 unsigned long userspace_addr; 115 unsigned long userspace_addr;
108 int user_alloc; 116 int user_alloc;
109}; 117};
@@ -124,7 +132,6 @@ struct kvm_kernel_irq_routing_entry {
124}; 132};
125 133
126struct kvm { 134struct kvm {
127 struct mutex lock; /* protects the vcpus array and APIC accesses */
128 spinlock_t mmu_lock; 135 spinlock_t mmu_lock;
129 spinlock_t requests_lock; 136 spinlock_t requests_lock;
130 struct rw_semaphore slots_lock; 137 struct rw_semaphore slots_lock;
@@ -132,10 +139,23 @@ struct kvm {
132 int nmemslots; 139 int nmemslots;
133 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 140 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
134 KVM_PRIVATE_MEM_SLOTS]; 141 KVM_PRIVATE_MEM_SLOTS];
142#ifdef CONFIG_KVM_APIC_ARCHITECTURE
143 u32 bsp_vcpu_id;
144 struct kvm_vcpu *bsp_vcpu;
145#endif
135 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 146 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
147 atomic_t online_vcpus;
136 struct list_head vm_list; 148 struct list_head vm_list;
149 struct mutex lock;
137 struct kvm_io_bus mmio_bus; 150 struct kvm_io_bus mmio_bus;
138 struct kvm_io_bus pio_bus; 151 struct kvm_io_bus pio_bus;
152#ifdef CONFIG_HAVE_KVM_EVENTFD
153 struct {
154 spinlock_t lock;
155 struct list_head items;
156 } irqfds;
157 struct list_head ioeventfds;
158#endif
139 struct kvm_vm_stat stat; 159 struct kvm_vm_stat stat;
140 struct kvm_arch arch; 160 struct kvm_arch arch;
141 atomic_t users_count; 161 atomic_t users_count;
@@ -144,6 +164,7 @@ struct kvm {
144 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 164 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
145#endif 165#endif
146 166
167 struct mutex irq_lock;
147#ifdef CONFIG_HAVE_KVM_IRQCHIP 168#ifdef CONFIG_HAVE_KVM_IRQCHIP
148 struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ 169 struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
149 struct hlist_head mask_notifier_list; 170 struct hlist_head mask_notifier_list;
@@ -167,6 +188,17 @@ struct kvm {
167#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) 188#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
168#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) 189#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
169 190
191static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
192{
193 smp_rmb();
194 return kvm->vcpus[i];
195}
196
197#define kvm_for_each_vcpu(idx, vcpup, kvm) \
198 for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
199 idx < atomic_read(&kvm->online_vcpus) && vcpup; \
200 vcpup = kvm_get_vcpu(kvm, ++idx))
201
170int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 202int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
171void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 203void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
172 204
@@ -201,6 +233,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
201 struct kvm_userspace_memory_region *mem, 233 struct kvm_userspace_memory_region *mem,
202 struct kvm_memory_slot old, 234 struct kvm_memory_slot old,
203 int user_alloc); 235 int user_alloc);
236void kvm_disable_largepages(void);
204void kvm_arch_flush_shadow(struct kvm *kvm); 237void kvm_arch_flush_shadow(struct kvm *kvm);
205gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); 238gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
206struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 239struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
@@ -243,8 +276,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
243 unsigned int ioctl, unsigned long arg); 276 unsigned int ioctl, unsigned long arg);
244long kvm_arch_vcpu_ioctl(struct file *filp, 277long kvm_arch_vcpu_ioctl(struct file *filp,
245 unsigned int ioctl, unsigned long arg); 278 unsigned int ioctl, unsigned long arg);
246void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
247void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
248 279
249int kvm_dev_ioctl_check_extension(long ext); 280int kvm_dev_ioctl_check_extension(long ext);
250 281
@@ -300,7 +331,6 @@ int kvm_arch_hardware_setup(void);
300void kvm_arch_hardware_unsetup(void); 331void kvm_arch_hardware_unsetup(void);
301void kvm_arch_check_processor_compat(void *rtn); 332void kvm_arch_check_processor_compat(void *rtn);
302int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 333int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
303int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
304 334
305void kvm_free_physmem(struct kvm *kvm); 335void kvm_free_physmem(struct kvm *kvm);
306 336
@@ -309,8 +339,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
309void kvm_free_all_assigned_devices(struct kvm *kvm); 339void kvm_free_all_assigned_devices(struct kvm *kvm);
310void kvm_arch_sync_events(struct kvm *kvm); 340void kvm_arch_sync_events(struct kvm *kvm);
311 341
312int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
313int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
314int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 342int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
315void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 343void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
316 344
@@ -366,7 +394,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
366void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 394void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
367void kvm_register_irq_ack_notifier(struct kvm *kvm, 395void kvm_register_irq_ack_notifier(struct kvm *kvm,
368 struct kvm_irq_ack_notifier *kian); 396 struct kvm_irq_ack_notifier *kian);
369void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); 397void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
398 struct kvm_irq_ack_notifier *kian);
370int kvm_request_irq_source_id(struct kvm *kvm); 399int kvm_request_irq_source_id(struct kvm *kvm);
371void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 400void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
372 401
@@ -459,37 +488,6 @@ struct kvm_stats_debugfs_item {
459extern struct kvm_stats_debugfs_item debugfs_entries[]; 488extern struct kvm_stats_debugfs_item debugfs_entries[];
460extern struct dentry *kvm_debugfs_dir; 489extern struct dentry *kvm_debugfs_dir;
461 490
462#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
463 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
464 vcpu, 5, d1, d2, d3, d4, d5)
465#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
466 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
467 vcpu, 4, d1, d2, d3, d4, 0)
468#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
469 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
470 vcpu, 3, d1, d2, d3, 0, 0)
471#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
472 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
473 vcpu, 2, d1, d2, 0, 0, 0)
474#define KVMTRACE_1D(evt, vcpu, d1, name) \
475 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
476 vcpu, 1, d1, 0, 0, 0, 0)
477#define KVMTRACE_0D(evt, vcpu, name) \
478 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
479 vcpu, 0, 0, 0, 0, 0, 0)
480
481#ifdef CONFIG_KVM_TRACE
482int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
483void kvm_trace_cleanup(void);
484#else
485static inline
486int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
487{
488 return -EINVAL;
489}
490#define kvm_trace_cleanup() ((void)0)
491#endif
492
493#ifdef KVM_ARCH_WANT_MMU_NOTIFIER 491#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
494static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) 492static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
495{ 493{
@@ -525,4 +523,33 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
525 523
526#endif 524#endif
527 525
526#ifdef CONFIG_HAVE_KVM_EVENTFD
527
528void kvm_eventfd_init(struct kvm *kvm);
529int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
530void kvm_irqfd_release(struct kvm *kvm);
531int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
532
533#else
534
535static inline void kvm_eventfd_init(struct kvm *kvm) {}
536static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
537{
538 return -EINVAL;
539}
540
541static inline void kvm_irqfd_release(struct kvm *kvm) {}
542static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
543{
544 return -ENOSYS;
545}
546
547#endif /* CONFIG_HAVE_KVM_EVENTFD */
548
549#ifdef CONFIG_KVM_APIC_ARCHITECTURE
550static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
551{
552 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
553}
554#endif
528#endif 555#endif