diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/linux/kvm_host.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 185 |
1 files changed, 150 insertions, 35 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c13cc48697aa..31ebb59cbd2f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/preempt.h> | 17 | #include <linux/preempt.h> |
18 | #include <linux/msi.h> | 18 | #include <linux/msi.h> |
19 | #include <linux/slab.h> | ||
20 | #include <linux/rcupdate.h> | ||
19 | #include <asm/signal.h> | 21 | #include <asm/signal.h> |
20 | 22 | ||
21 | #include <linux/kvm.h> | 23 | #include <linux/kvm.h> |
@@ -25,6 +27,10 @@ | |||
25 | 27 | ||
26 | #include <asm/kvm_host.h> | 28 | #include <asm/kvm_host.h> |
27 | 29 | ||
30 | #ifndef KVM_MMIO_SIZE | ||
31 | #define KVM_MMIO_SIZE 8 | ||
32 | #endif | ||
33 | |||
28 | /* | 34 | /* |
29 | * vcpu->requests bit members | 35 | * vcpu->requests bit members |
30 | */ | 36 | */ |
@@ -36,9 +42,11 @@ | |||
36 | #define KVM_REQ_PENDING_TIMER 5 | 42 | #define KVM_REQ_PENDING_TIMER 5 |
37 | #define KVM_REQ_UNHALT 6 | 43 | #define KVM_REQ_UNHALT 6 |
38 | #define KVM_REQ_MMU_SYNC 7 | 44 | #define KVM_REQ_MMU_SYNC 7 |
39 | #define KVM_REQ_KVMCLOCK_UPDATE 8 | 45 | #define KVM_REQ_CLOCK_UPDATE 8 |
40 | #define KVM_REQ_KICK 9 | 46 | #define KVM_REQ_KICK 9 |
41 | #define KVM_REQ_DEACTIVATE_FPU 10 | 47 | #define KVM_REQ_DEACTIVATE_FPU 10 |
48 | #define KVM_REQ_EVENT 11 | ||
49 | #define KVM_REQ_APF_HALT 12 | ||
42 | 50 | ||
43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 51 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
44 | 52 | ||
@@ -73,23 +81,52 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, | |||
73 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, | 81 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
74 | struct kvm_io_device *dev); | 82 | struct kvm_io_device *dev); |
75 | 83 | ||
84 | #ifdef CONFIG_KVM_ASYNC_PF | ||
85 | struct kvm_async_pf { | ||
86 | struct work_struct work; | ||
87 | struct list_head link; | ||
88 | struct list_head queue; | ||
89 | struct kvm_vcpu *vcpu; | ||
90 | struct mm_struct *mm; | ||
91 | gva_t gva; | ||
92 | unsigned long addr; | ||
93 | struct kvm_arch_async_pf arch; | ||
94 | struct page *page; | ||
95 | bool done; | ||
96 | }; | ||
97 | |||
98 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); | ||
99 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); | ||
100 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, | ||
101 | struct kvm_arch_async_pf *arch); | ||
102 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); | ||
103 | #endif | ||
104 | |||
105 | enum { | ||
106 | OUTSIDE_GUEST_MODE, | ||
107 | IN_GUEST_MODE, | ||
108 | EXITING_GUEST_MODE | ||
109 | }; | ||
110 | |||
76 | struct kvm_vcpu { | 111 | struct kvm_vcpu { |
77 | struct kvm *kvm; | 112 | struct kvm *kvm; |
78 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 113 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
79 | struct preempt_notifier preempt_notifier; | 114 | struct preempt_notifier preempt_notifier; |
80 | #endif | 115 | #endif |
116 | int cpu; | ||
81 | int vcpu_id; | 117 | int vcpu_id; |
82 | struct mutex mutex; | 118 | int srcu_idx; |
83 | int cpu; | 119 | int mode; |
84 | atomic_t guest_mode; | ||
85 | struct kvm_run *run; | ||
86 | unsigned long requests; | 120 | unsigned long requests; |
87 | unsigned long guest_debug; | 121 | unsigned long guest_debug; |
88 | int srcu_idx; | 122 | |
123 | struct mutex mutex; | ||
124 | struct kvm_run *run; | ||
89 | 125 | ||
90 | int fpu_active; | 126 | int fpu_active; |
91 | int guest_fpu_loaded, guest_xcr0_loaded; | 127 | int guest_fpu_loaded, guest_xcr0_loaded; |
92 | wait_queue_head_t wq; | 128 | wait_queue_head_t wq; |
129 | struct pid *pid; | ||
93 | int sigset_active; | 130 | int sigset_active; |
94 | sigset_t sigset; | 131 | sigset_t sigset; |
95 | struct kvm_vcpu_stat stat; | 132 | struct kvm_vcpu_stat stat; |
@@ -99,29 +136,47 @@ struct kvm_vcpu { | |||
99 | int mmio_read_completed; | 136 | int mmio_read_completed; |
100 | int mmio_is_write; | 137 | int mmio_is_write; |
101 | int mmio_size; | 138 | int mmio_size; |
102 | unsigned char mmio_data[8]; | 139 | int mmio_index; |
140 | unsigned char mmio_data[KVM_MMIO_SIZE]; | ||
103 | gpa_t mmio_phys_addr; | 141 | gpa_t mmio_phys_addr; |
104 | #endif | 142 | #endif |
105 | 143 | ||
144 | #ifdef CONFIG_KVM_ASYNC_PF | ||
145 | struct { | ||
146 | u32 queued; | ||
147 | struct list_head queue; | ||
148 | struct list_head done; | ||
149 | spinlock_t lock; | ||
150 | } async_pf; | ||
151 | #endif | ||
152 | |||
106 | struct kvm_vcpu_arch arch; | 153 | struct kvm_vcpu_arch arch; |
107 | }; | 154 | }; |
108 | 155 | ||
156 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) | ||
157 | { | ||
158 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); | ||
159 | } | ||
160 | |||
109 | /* | 161 | /* |
110 | * Some of the bitops functions do not support too long bitmaps. | 162 | * Some of the bitops functions do not support too long bitmaps. |
111 | * This number must be determined not to exceed such limits. | 163 | * This number must be determined not to exceed such limits. |
112 | */ | 164 | */ |
113 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) | 165 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
114 | 166 | ||
167 | struct kvm_lpage_info { | ||
168 | unsigned long rmap_pde; | ||
169 | int write_count; | ||
170 | }; | ||
171 | |||
115 | struct kvm_memory_slot { | 172 | struct kvm_memory_slot { |
116 | gfn_t base_gfn; | 173 | gfn_t base_gfn; |
117 | unsigned long npages; | 174 | unsigned long npages; |
118 | unsigned long flags; | 175 | unsigned long flags; |
119 | unsigned long *rmap; | 176 | unsigned long *rmap; |
120 | unsigned long *dirty_bitmap; | 177 | unsigned long *dirty_bitmap; |
121 | struct { | 178 | unsigned long *dirty_bitmap_head; |
122 | unsigned long rmap_pde; | 179 | struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; |
123 | int write_count; | ||
124 | } *lpage_info[KVM_NR_PAGE_SIZES - 1]; | ||
125 | unsigned long userspace_addr; | 180 | unsigned long userspace_addr; |
126 | int user_alloc; | 181 | int user_alloc; |
127 | int id; | 182 | int id; |
@@ -168,13 +223,13 @@ struct kvm_irq_routing_table {}; | |||
168 | 223 | ||
169 | struct kvm_memslots { | 224 | struct kvm_memslots { |
170 | int nmemslots; | 225 | int nmemslots; |
226 | u64 generation; | ||
171 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + | 227 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
172 | KVM_PRIVATE_MEM_SLOTS]; | 228 | KVM_PRIVATE_MEM_SLOTS]; |
173 | }; | 229 | }; |
174 | 230 | ||
175 | struct kvm { | 231 | struct kvm { |
176 | spinlock_t mmu_lock; | 232 | spinlock_t mmu_lock; |
177 | raw_spinlock_t requests_lock; | ||
178 | struct mutex slots_lock; | 233 | struct mutex slots_lock; |
179 | struct mm_struct *mm; /* userspace tied to this vm */ | 234 | struct mm_struct *mm; /* userspace tied to this vm */ |
180 | struct kvm_memslots *memslots; | 235 | struct kvm_memslots *memslots; |
@@ -185,6 +240,7 @@ struct kvm { | |||
185 | #endif | 240 | #endif |
186 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; | 241 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
187 | atomic_t online_vcpus; | 242 | atomic_t online_vcpus; |
243 | int last_boosted_vcpu; | ||
188 | struct list_head vm_list; | 244 | struct list_head vm_list; |
189 | struct mutex lock; | 245 | struct mutex lock; |
190 | struct kvm_io_bus *buses[KVM_NR_BUSES]; | 246 | struct kvm_io_bus *buses[KVM_NR_BUSES]; |
@@ -205,7 +261,11 @@ struct kvm { | |||
205 | 261 | ||
206 | struct mutex irq_lock; | 262 | struct mutex irq_lock; |
207 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 263 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
208 | struct kvm_irq_routing_table *irq_routing; | 264 | /* |
265 | * Update side is protected by irq_lock and, | ||
266 | * if configured, irqfds.lock. | ||
267 | */ | ||
268 | struct kvm_irq_routing_table __rcu *irq_routing; | ||
209 | struct hlist_head mask_notifier_list; | 269 | struct hlist_head mask_notifier_list; |
210 | struct hlist_head irq_ack_notifier_list; | 270 | struct hlist_head irq_ack_notifier_list; |
211 | #endif | 271 | #endif |
@@ -215,6 +275,7 @@ struct kvm { | |||
215 | unsigned long mmu_notifier_seq; | 275 | unsigned long mmu_notifier_seq; |
216 | long mmu_notifier_count; | 276 | long mmu_notifier_count; |
217 | #endif | 277 | #endif |
278 | long tlbs_dirty; | ||
218 | }; | 279 | }; |
219 | 280 | ||
220 | /* The guest did something we don't support. */ | 281 | /* The guest did something we don't support. */ |
@@ -235,9 +296,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | |||
235 | } | 296 | } |
236 | 297 | ||
237 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ | 298 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ |
238 | for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \ | 299 | for (idx = 0; \ |
239 | idx < atomic_read(&kvm->online_vcpus) && vcpup; \ | 300 | idx < atomic_read(&kvm->online_vcpus) && \ |
240 | vcpup = kvm_get_vcpu(kvm, ++idx)) | 301 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ |
302 | idx++) | ||
241 | 303 | ||
242 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); | 304 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
243 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | 305 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
@@ -289,6 +351,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
289 | void kvm_disable_largepages(void); | 351 | void kvm_disable_largepages(void); |
290 | void kvm_arch_flush_shadow(struct kvm *kvm); | 352 | void kvm_arch_flush_shadow(struct kvm *kvm); |
291 | 353 | ||
354 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, | ||
355 | int nr_pages); | ||
356 | |||
292 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 357 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
293 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 358 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
294 | void kvm_release_page_clean(struct page *page); | 359 | void kvm_release_page_clean(struct page *page); |
@@ -296,10 +361,15 @@ void kvm_release_page_dirty(struct page *page); | |||
296 | void kvm_set_page_dirty(struct page *page); | 361 | void kvm_set_page_dirty(struct page *page); |
297 | void kvm_set_page_accessed(struct page *page); | 362 | void kvm_set_page_accessed(struct page *page); |
298 | 363 | ||
364 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); | ||
365 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); | ||
366 | pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, | ||
367 | bool write_fault, bool *writable); | ||
299 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | 368 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
369 | pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | ||
370 | bool *writable); | ||
300 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, | 371 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, |
301 | struct kvm_memory_slot *slot, gfn_t gfn); | 372 | struct kvm_memory_slot *slot, gfn_t gfn); |
302 | int memslot_id(struct kvm *kvm, gfn_t gfn); | ||
303 | void kvm_release_pfn_dirty(pfn_t); | 373 | void kvm_release_pfn_dirty(pfn_t); |
304 | void kvm_release_pfn_clean(pfn_t pfn); | 374 | void kvm_release_pfn_clean(pfn_t pfn); |
305 | void kvm_set_pfn_dirty(pfn_t pfn); | 375 | void kvm_set_pfn_dirty(pfn_t pfn); |
@@ -315,18 +385,25 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | |||
315 | int offset, int len); | 385 | int offset, int len); |
316 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | 386 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
317 | unsigned long len); | 387 | unsigned long len); |
388 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | ||
389 | void *data, unsigned long len); | ||
390 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | ||
391 | gpa_t gpa); | ||
318 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); | 392 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
319 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | 393 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
320 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | 394 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
321 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); | 395 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
322 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); | 396 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
323 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | 397 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
398 | void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, | ||
399 | gfn_t gfn); | ||
324 | 400 | ||
325 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | 401 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
326 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); | 402 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
327 | void kvm_resched(struct kvm_vcpu *vcpu); | 403 | void kvm_resched(struct kvm_vcpu *vcpu); |
328 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); | 404 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
329 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | 405 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
406 | |||
330 | void kvm_flush_remote_tlbs(struct kvm *kvm); | 407 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
331 | void kvm_reload_remote_mmus(struct kvm *kvm); | 408 | void kvm_reload_remote_mmus(struct kvm *kvm); |
332 | 409 | ||
@@ -392,7 +469,19 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | |||
392 | 469 | ||
393 | void kvm_free_physmem(struct kvm *kvm); | 470 | void kvm_free_physmem(struct kvm *kvm); |
394 | 471 | ||
395 | struct kvm *kvm_arch_create_vm(void); | 472 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
473 | static inline struct kvm *kvm_arch_alloc_vm(void) | ||
474 | { | ||
475 | return kzalloc(sizeof(struct kvm), GFP_KERNEL); | ||
476 | } | ||
477 | |||
478 | static inline void kvm_arch_free_vm(struct kvm *kvm) | ||
479 | { | ||
480 | kfree(kvm); | ||
481 | } | ||
482 | #endif | ||
483 | |||
484 | int kvm_arch_init_vm(struct kvm *kvm); | ||
396 | void kvm_arch_destroy_vm(struct kvm *kvm); | 485 | void kvm_arch_destroy_vm(struct kvm *kvm); |
397 | void kvm_free_all_assigned_devices(struct kvm *kvm); | 486 | void kvm_free_all_assigned_devices(struct kvm *kvm); |
398 | void kvm_arch_sync_events(struct kvm *kvm); | 487 | void kvm_arch_sync_events(struct kvm *kvm); |
@@ -408,16 +497,8 @@ struct kvm_irq_ack_notifier { | |||
408 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | 497 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
409 | }; | 498 | }; |
410 | 499 | ||
411 | #define KVM_ASSIGNED_MSIX_PENDING 0x1 | ||
412 | struct kvm_guest_msix_entry { | ||
413 | u32 vector; | ||
414 | u16 entry; | ||
415 | u16 flags; | ||
416 | }; | ||
417 | |||
418 | struct kvm_assigned_dev_kernel { | 500 | struct kvm_assigned_dev_kernel { |
419 | struct kvm_irq_ack_notifier ack_notifier; | 501 | struct kvm_irq_ack_notifier ack_notifier; |
420 | struct work_struct interrupt_work; | ||
421 | struct list_head list; | 502 | struct list_head list; |
422 | int assigned_dev_id; | 503 | int assigned_dev_id; |
423 | int host_segnr; | 504 | int host_segnr; |
@@ -428,13 +509,15 @@ struct kvm_assigned_dev_kernel { | |||
428 | bool host_irq_disabled; | 509 | bool host_irq_disabled; |
429 | struct msix_entry *host_msix_entries; | 510 | struct msix_entry *host_msix_entries; |
430 | int guest_irq; | 511 | int guest_irq; |
431 | struct kvm_guest_msix_entry *guest_msix_entries; | 512 | struct msix_entry *guest_msix_entries; |
432 | unsigned long irq_requested_type; | 513 | unsigned long irq_requested_type; |
433 | int irq_source_id; | 514 | int irq_source_id; |
434 | int flags; | 515 | int flags; |
435 | struct pci_dev *dev; | 516 | struct pci_dev *dev; |
436 | struct kvm *kvm; | 517 | struct kvm *kvm; |
437 | spinlock_t assigned_dev_lock; | 518 | spinlock_t intx_lock; |
519 | char irq_name[32]; | ||
520 | struct pci_saved_state *pci_saved_state; | ||
438 | }; | 521 | }; |
439 | 522 | ||
440 | struct kvm_irq_mask_notifier { | 523 | struct kvm_irq_mask_notifier { |
@@ -456,6 +539,8 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | |||
456 | unsigned long *deliver_bitmask); | 539 | unsigned long *deliver_bitmask); |
457 | #endif | 540 | #endif |
458 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); | 541 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); |
542 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, | ||
543 | int irq_source_id, int level); | ||
459 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); | 544 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
460 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 545 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
461 | struct kvm_irq_ack_notifier *kian); | 546 | struct kvm_irq_ack_notifier *kian); |
@@ -477,8 +562,7 @@ int kvm_deassign_device(struct kvm *kvm, | |||
477 | struct kvm_assigned_dev_kernel *assigned_dev); | 562 | struct kvm_assigned_dev_kernel *assigned_dev); |
478 | #else /* CONFIG_IOMMU_API */ | 563 | #else /* CONFIG_IOMMU_API */ |
479 | static inline int kvm_iommu_map_pages(struct kvm *kvm, | 564 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
480 | gfn_t base_gfn, | 565 | struct kvm_memory_slot *slot) |
481 | unsigned long npages) | ||
482 | { | 566 | { |
483 | return 0; | 567 | return 0; |
484 | } | 568 | } |
@@ -508,8 +592,17 @@ static inline int kvm_deassign_device(struct kvm *kvm, | |||
508 | 592 | ||
509 | static inline void kvm_guest_enter(void) | 593 | static inline void kvm_guest_enter(void) |
510 | { | 594 | { |
595 | BUG_ON(preemptible()); | ||
511 | account_system_vtime(current); | 596 | account_system_vtime(current); |
512 | current->flags |= PF_VCPU; | 597 | current->flags |= PF_VCPU; |
598 | /* KVM does not hold any references to rcu protected data when it | ||
599 | * switches CPU into a guest mode. In fact switching to a guest mode | ||
600 | * is very similar to exiting to userspase from rcu point of view. In | ||
601 | * addition CPU may stay in a guest mode for quite a long time (up to | ||
602 | * one time slice). Lets treat guest mode as quiescent state, just like | ||
603 | * we do with user-mode execution. | ||
604 | */ | ||
605 | rcu_virt_note_context_switch(smp_processor_id()); | ||
513 | } | 606 | } |
514 | 607 | ||
515 | static inline void kvm_guest_exit(void) | 608 | static inline void kvm_guest_exit(void) |
@@ -518,11 +611,27 @@ static inline void kvm_guest_exit(void) | |||
518 | current->flags &= ~PF_VCPU; | 611 | current->flags &= ~PF_VCPU; |
519 | } | 612 | } |
520 | 613 | ||
614 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) | ||
615 | { | ||
616 | return gfn_to_memslot(kvm, gfn)->id; | ||
617 | } | ||
618 | |||
619 | static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, | ||
620 | gfn_t gfn) | ||
621 | { | ||
622 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; | ||
623 | } | ||
624 | |||
521 | static inline gpa_t gfn_to_gpa(gfn_t gfn) | 625 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
522 | { | 626 | { |
523 | return (gpa_t)gfn << PAGE_SHIFT; | 627 | return (gpa_t)gfn << PAGE_SHIFT; |
524 | } | 628 | } |
525 | 629 | ||
630 | static inline gfn_t gpa_to_gfn(gpa_t gpa) | ||
631 | { | ||
632 | return (gfn_t)(gpa >> PAGE_SHIFT); | ||
633 | } | ||
634 | |||
526 | static inline hpa_t pfn_to_hpa(pfn_t pfn) | 635 | static inline hpa_t pfn_to_hpa(pfn_t pfn) |
527 | { | 636 | { |
528 | return (hpa_t)pfn << PAGE_SHIFT; | 637 | return (hpa_t)pfn << PAGE_SHIFT; |
@@ -587,17 +696,28 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |||
587 | void kvm_eventfd_init(struct kvm *kvm); | 696 | void kvm_eventfd_init(struct kvm *kvm); |
588 | int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); | 697 | int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); |
589 | void kvm_irqfd_release(struct kvm *kvm); | 698 | void kvm_irqfd_release(struct kvm *kvm); |
699 | void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); | ||
590 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); | 700 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
591 | 701 | ||
592 | #else | 702 | #else |
593 | 703 | ||
594 | static inline void kvm_eventfd_init(struct kvm *kvm) {} | 704 | static inline void kvm_eventfd_init(struct kvm *kvm) {} |
705 | |||
595 | static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) | 706 | static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) |
596 | { | 707 | { |
597 | return -EINVAL; | 708 | return -EINVAL; |
598 | } | 709 | } |
599 | 710 | ||
600 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | 711 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
712 | |||
713 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | ||
714 | static inline void kvm_irq_routing_update(struct kvm *kvm, | ||
715 | struct kvm_irq_routing_table *irq_rt) | ||
716 | { | ||
717 | rcu_assign_pointer(kvm->irq_routing, irq_rt); | ||
718 | } | ||
719 | #endif | ||
720 | |||
601 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) | 721 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
602 | { | 722 | { |
603 | return -ENOSYS; | 723 | return -ENOSYS; |
@@ -632,11 +752,6 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) | |||
632 | set_bit(req, &vcpu->requests); | 752 | set_bit(req, &vcpu->requests); |
633 | } | 753 | } |
634 | 754 | ||
635 | static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu) | ||
636 | { | ||
637 | return test_and_set_bit(req, &vcpu->requests); | ||
638 | } | ||
639 | |||
640 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) | 755 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
641 | { | 756 | { |
642 | if (test_bit(req, &vcpu->requests)) { | 757 | if (test_bit(req, &vcpu->requests)) { |