diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-09-20 08:55:36 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-09-20 08:55:36 -0400 |
commit | 6469f540ea37d53db089c8fea9c0c77a3d9353d4 (patch) | |
tree | 1dc9dc077150d57f4424cae49e711b5dd6e903a1 /include/linux/kvm_host.h | |
parent | 304e6d5fe294b80e6d3107f99ec241816390ebcc (diff) | |
parent | 78f28b7c555359c67c2a0d23f7436e915329421e (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/mtd/mtdcore.c
Merged in order that I can apply the Nomadik nand/onenand support patches.
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 114 |
1 files changed, 71 insertions, 43 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3060bdc35ffe..4af56036a6bf 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
44 | 44 | ||
45 | struct kvm; | ||
45 | struct kvm_vcpu; | 46 | struct kvm_vcpu; |
46 | extern struct kmem_cache *kvm_vcpu_cache; | 47 | extern struct kmem_cache *kvm_vcpu_cache; |
47 | 48 | ||
@@ -59,10 +60,18 @@ struct kvm_io_bus { | |||
59 | 60 | ||
60 | void kvm_io_bus_init(struct kvm_io_bus *bus); | 61 | void kvm_io_bus_init(struct kvm_io_bus *bus); |
61 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); | 62 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
62 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, | 63 | int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len, |
63 | gpa_t addr, int len, int is_write); | 64 | const void *val); |
64 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, | 65 | int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, |
65 | struct kvm_io_device *dev); | 66 | void *val); |
67 | int __kvm_io_bus_register_dev(struct kvm_io_bus *bus, | ||
68 | struct kvm_io_device *dev); | ||
69 | int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus, | ||
70 | struct kvm_io_device *dev); | ||
71 | void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus, | ||
72 | struct kvm_io_device *dev); | ||
73 | void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus, | ||
74 | struct kvm_io_device *dev); | ||
66 | 75 | ||
67 | struct kvm_vcpu { | 76 | struct kvm_vcpu { |
68 | struct kvm *kvm; | 77 | struct kvm *kvm; |
@@ -103,7 +112,7 @@ struct kvm_memory_slot { | |||
103 | struct { | 112 | struct { |
104 | unsigned long rmap_pde; | 113 | unsigned long rmap_pde; |
105 | int write_count; | 114 | int write_count; |
106 | } *lpage_info; | 115 | } *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
107 | unsigned long userspace_addr; | 116 | unsigned long userspace_addr; |
108 | int user_alloc; | 117 | int user_alloc; |
109 | }; | 118 | }; |
@@ -124,7 +133,6 @@ struct kvm_kernel_irq_routing_entry { | |||
124 | }; | 133 | }; |
125 | 134 | ||
126 | struct kvm { | 135 | struct kvm { |
127 | struct mutex lock; /* protects the vcpus array and APIC accesses */ | ||
128 | spinlock_t mmu_lock; | 136 | spinlock_t mmu_lock; |
129 | spinlock_t requests_lock; | 137 | spinlock_t requests_lock; |
130 | struct rw_semaphore slots_lock; | 138 | struct rw_semaphore slots_lock; |
@@ -132,10 +140,23 @@ struct kvm { | |||
132 | int nmemslots; | 140 | int nmemslots; |
133 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + | 141 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
134 | KVM_PRIVATE_MEM_SLOTS]; | 142 | KVM_PRIVATE_MEM_SLOTS]; |
143 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | ||
144 | u32 bsp_vcpu_id; | ||
145 | struct kvm_vcpu *bsp_vcpu; | ||
146 | #endif | ||
135 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; | 147 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
148 | atomic_t online_vcpus; | ||
136 | struct list_head vm_list; | 149 | struct list_head vm_list; |
150 | struct mutex lock; | ||
137 | struct kvm_io_bus mmio_bus; | 151 | struct kvm_io_bus mmio_bus; |
138 | struct kvm_io_bus pio_bus; | 152 | struct kvm_io_bus pio_bus; |
153 | #ifdef CONFIG_HAVE_KVM_EVENTFD | ||
154 | struct { | ||
155 | spinlock_t lock; | ||
156 | struct list_head items; | ||
157 | } irqfds; | ||
158 | struct list_head ioeventfds; | ||
159 | #endif | ||
139 | struct kvm_vm_stat stat; | 160 | struct kvm_vm_stat stat; |
140 | struct kvm_arch arch; | 161 | struct kvm_arch arch; |
141 | atomic_t users_count; | 162 | atomic_t users_count; |
@@ -144,6 +165,7 @@ struct kvm { | |||
144 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; | 165 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
145 | #endif | 166 | #endif |
146 | 167 | ||
168 | struct mutex irq_lock; | ||
147 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 169 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
148 | struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ | 170 | struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ |
149 | struct hlist_head mask_notifier_list; | 171 | struct hlist_head mask_notifier_list; |
@@ -167,6 +189,17 @@ struct kvm { | |||
167 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) | 189 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) |
168 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) | 190 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) |
169 | 191 | ||
192 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | ||
193 | { | ||
194 | smp_rmb(); | ||
195 | return kvm->vcpus[i]; | ||
196 | } | ||
197 | |||
198 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ | ||
199 | for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \ | ||
200 | idx < atomic_read(&kvm->online_vcpus) && vcpup; \ | ||
201 | vcpup = kvm_get_vcpu(kvm, ++idx)) | ||
202 | |||
170 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); | 203 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
171 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | 204 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
172 | 205 | ||
@@ -201,6 +234,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
201 | struct kvm_userspace_memory_region *mem, | 234 | struct kvm_userspace_memory_region *mem, |
202 | struct kvm_memory_slot old, | 235 | struct kvm_memory_slot old, |
203 | int user_alloc); | 236 | int user_alloc); |
237 | void kvm_disable_largepages(void); | ||
204 | void kvm_arch_flush_shadow(struct kvm *kvm); | 238 | void kvm_arch_flush_shadow(struct kvm *kvm); |
205 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); | 239 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); |
206 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 240 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
@@ -243,8 +277,6 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
243 | unsigned int ioctl, unsigned long arg); | 277 | unsigned int ioctl, unsigned long arg); |
244 | long kvm_arch_vcpu_ioctl(struct file *filp, | 278 | long kvm_arch_vcpu_ioctl(struct file *filp, |
245 | unsigned int ioctl, unsigned long arg); | 279 | unsigned int ioctl, unsigned long arg); |
246 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | ||
247 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | ||
248 | 280 | ||
249 | int kvm_dev_ioctl_check_extension(long ext); | 281 | int kvm_dev_ioctl_check_extension(long ext); |
250 | 282 | ||
@@ -300,7 +332,6 @@ int kvm_arch_hardware_setup(void); | |||
300 | void kvm_arch_hardware_unsetup(void); | 332 | void kvm_arch_hardware_unsetup(void); |
301 | void kvm_arch_check_processor_compat(void *rtn); | 333 | void kvm_arch_check_processor_compat(void *rtn); |
302 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | 334 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
303 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | ||
304 | 335 | ||
305 | void kvm_free_physmem(struct kvm *kvm); | 336 | void kvm_free_physmem(struct kvm *kvm); |
306 | 337 | ||
@@ -309,8 +340,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm); | |||
309 | void kvm_free_all_assigned_devices(struct kvm *kvm); | 340 | void kvm_free_all_assigned_devices(struct kvm *kvm); |
310 | void kvm_arch_sync_events(struct kvm *kvm); | 341 | void kvm_arch_sync_events(struct kvm *kvm); |
311 | 342 | ||
312 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | ||
313 | int kvm_cpu_has_interrupt(struct kvm_vcpu *v); | ||
314 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); | 343 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
315 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 344 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
316 | 345 | ||
@@ -366,7 +395,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); | |||
366 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); | 395 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
367 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 396 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
368 | struct kvm_irq_ack_notifier *kian); | 397 | struct kvm_irq_ack_notifier *kian); |
369 | void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); | 398 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
399 | struct kvm_irq_ack_notifier *kian); | ||
370 | int kvm_request_irq_source_id(struct kvm *kvm); | 400 | int kvm_request_irq_source_id(struct kvm *kvm); |
371 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | 401 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
372 | 402 | ||
@@ -459,37 +489,6 @@ struct kvm_stats_debugfs_item { | |||
459 | extern struct kvm_stats_debugfs_item debugfs_entries[]; | 489 | extern struct kvm_stats_debugfs_item debugfs_entries[]; |
460 | extern struct dentry *kvm_debugfs_dir; | 490 | extern struct dentry *kvm_debugfs_dir; |
461 | 491 | ||
462 | #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ | ||
463 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
464 | vcpu, 5, d1, d2, d3, d4, d5) | ||
465 | #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ | ||
466 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
467 | vcpu, 4, d1, d2, d3, d4, 0) | ||
468 | #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ | ||
469 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
470 | vcpu, 3, d1, d2, d3, 0, 0) | ||
471 | #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ | ||
472 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
473 | vcpu, 2, d1, d2, 0, 0, 0) | ||
474 | #define KVMTRACE_1D(evt, vcpu, d1, name) \ | ||
475 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
476 | vcpu, 1, d1, 0, 0, 0, 0) | ||
477 | #define KVMTRACE_0D(evt, vcpu, name) \ | ||
478 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | ||
479 | vcpu, 0, 0, 0, 0, 0, 0) | ||
480 | |||
481 | #ifdef CONFIG_KVM_TRACE | ||
482 | int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg); | ||
483 | void kvm_trace_cleanup(void); | ||
484 | #else | ||
485 | static inline | ||
486 | int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) | ||
487 | { | ||
488 | return -EINVAL; | ||
489 | } | ||
490 | #define kvm_trace_cleanup() ((void)0) | ||
491 | #endif | ||
492 | |||
493 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER | 492 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER |
494 | static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) | 493 | static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) |
495 | { | 494 | { |
@@ -525,4 +524,33 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |||
525 | 524 | ||
526 | #endif | 525 | #endif |
527 | 526 | ||
527 | #ifdef CONFIG_HAVE_KVM_EVENTFD | ||
528 | |||
529 | void kvm_eventfd_init(struct kvm *kvm); | ||
530 | int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); | ||
531 | void kvm_irqfd_release(struct kvm *kvm); | ||
532 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); | ||
533 | |||
534 | #else | ||
535 | |||
536 | static inline void kvm_eventfd_init(struct kvm *kvm) {} | ||
537 | static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) | ||
538 | { | ||
539 | return -EINVAL; | ||
540 | } | ||
541 | |||
542 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | ||
543 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | ||
544 | { | ||
545 | return -ENOSYS; | ||
546 | } | ||
547 | |||
548 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ | ||
549 | |||
550 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE | ||
551 | static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) | ||
552 | { | ||
553 | return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; | ||
554 | } | ||
555 | #endif | ||
528 | #endif | 556 | #endif |