diff options
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 85 |
1 files changed, 65 insertions, 20 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c497ab0d03d..cad77fe09d77 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/rcupdate.h> | 22 | #include <linux/rcupdate.h> |
23 | #include <linux/ratelimit.h> | 23 | #include <linux/ratelimit.h> |
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/irqflags.h> | ||
25 | #include <asm/signal.h> | 26 | #include <asm/signal.h> |
26 | 27 | ||
27 | #include <linux/kvm.h> | 28 | #include <linux/kvm.h> |
@@ -122,6 +123,8 @@ static inline bool is_error_page(struct page *page) | |||
122 | #define KVM_REQ_WATCHDOG 18 | 123 | #define KVM_REQ_WATCHDOG 18 |
123 | #define KVM_REQ_MASTERCLOCK_UPDATE 19 | 124 | #define KVM_REQ_MASTERCLOCK_UPDATE 19 |
124 | #define KVM_REQ_MCLOCK_INPROGRESS 20 | 125 | #define KVM_REQ_MCLOCK_INPROGRESS 20 |
126 | #define KVM_REQ_EPR_EXIT 21 | ||
127 | #define KVM_REQ_EOIBITMAP 22 | ||
125 | 128 | ||
126 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 129 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
127 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | 130 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
@@ -266,12 +269,11 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) | |||
266 | struct kvm_memory_slot { | 269 | struct kvm_memory_slot { |
267 | gfn_t base_gfn; | 270 | gfn_t base_gfn; |
268 | unsigned long npages; | 271 | unsigned long npages; |
269 | unsigned long flags; | ||
270 | unsigned long *dirty_bitmap; | 272 | unsigned long *dirty_bitmap; |
271 | struct kvm_arch_memory_slot arch; | 273 | struct kvm_arch_memory_slot arch; |
272 | unsigned long userspace_addr; | 274 | unsigned long userspace_addr; |
273 | int user_alloc; | 275 | u32 flags; |
274 | int id; | 276 | short id; |
275 | }; | 277 | }; |
276 | 278 | ||
277 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) | 279 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
@@ -313,8 +315,12 @@ struct kvm_irq_routing_table {}; | |||
313 | 315 | ||
314 | #endif | 316 | #endif |
315 | 317 | ||
318 | #ifndef KVM_PRIVATE_MEM_SLOTS | ||
319 | #define KVM_PRIVATE_MEM_SLOTS 0 | ||
320 | #endif | ||
321 | |||
316 | #ifndef KVM_MEM_SLOTS_NUM | 322 | #ifndef KVM_MEM_SLOTS_NUM |
317 | #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) | 323 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
318 | #endif | 324 | #endif |
319 | 325 | ||
320 | /* | 326 | /* |
@@ -326,7 +332,7 @@ struct kvm_memslots { | |||
326 | u64 generation; | 332 | u64 generation; |
327 | struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; | 333 | struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
328 | /* The mapping table from slot id to the index in memslots[]. */ | 334 | /* The mapping table from slot id to the index in memslots[]. */ |
329 | int id_to_index[KVM_MEM_SLOTS_NUM]; | 335 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
330 | }; | 336 | }; |
331 | 337 | ||
332 | struct kvm { | 338 | struct kvm { |
@@ -424,7 +430,8 @@ void kvm_exit(void); | |||
424 | 430 | ||
425 | void kvm_get_kvm(struct kvm *kvm); | 431 | void kvm_get_kvm(struct kvm *kvm); |
426 | void kvm_put_kvm(struct kvm *kvm); | 432 | void kvm_put_kvm(struct kvm *kvm); |
427 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new); | 433 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, |
434 | u64 last_generation); | ||
428 | 435 | ||
429 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) | 436 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
430 | { | 437 | { |
@@ -447,10 +454,10 @@ id_to_memslot(struct kvm_memslots *slots, int id) | |||
447 | 454 | ||
448 | int kvm_set_memory_region(struct kvm *kvm, | 455 | int kvm_set_memory_region(struct kvm *kvm, |
449 | struct kvm_userspace_memory_region *mem, | 456 | struct kvm_userspace_memory_region *mem, |
450 | int user_alloc); | 457 | bool user_alloc); |
451 | int __kvm_set_memory_region(struct kvm *kvm, | 458 | int __kvm_set_memory_region(struct kvm *kvm, |
452 | struct kvm_userspace_memory_region *mem, | 459 | struct kvm_userspace_memory_region *mem, |
453 | int user_alloc); | 460 | bool user_alloc); |
454 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, | 461 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, |
455 | struct kvm_memory_slot *dont); | 462 | struct kvm_memory_slot *dont); |
456 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); | 463 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); |
@@ -458,11 +465,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
458 | struct kvm_memory_slot *memslot, | 465 | struct kvm_memory_slot *memslot, |
459 | struct kvm_memory_slot old, | 466 | struct kvm_memory_slot old, |
460 | struct kvm_userspace_memory_region *mem, | 467 | struct kvm_userspace_memory_region *mem, |
461 | int user_alloc); | 468 | bool user_alloc); |
462 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 469 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
463 | struct kvm_userspace_memory_region *mem, | 470 | struct kvm_userspace_memory_region *mem, |
464 | struct kvm_memory_slot old, | 471 | struct kvm_memory_slot old, |
465 | int user_alloc); | 472 | bool user_alloc); |
466 | bool kvm_largepages_enabled(void); | 473 | bool kvm_largepages_enabled(void); |
467 | void kvm_disable_largepages(void); | 474 | void kvm_disable_largepages(void); |
468 | /* flush all memory translations */ | 475 | /* flush all memory translations */ |
@@ -532,6 +539,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | |||
532 | void kvm_flush_remote_tlbs(struct kvm *kvm); | 539 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
533 | void kvm_reload_remote_mmus(struct kvm *kvm); | 540 | void kvm_reload_remote_mmus(struct kvm *kvm); |
534 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); | 541 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); |
542 | void kvm_make_update_eoibitmap_request(struct kvm *kvm); | ||
535 | 543 | ||
536 | long kvm_arch_dev_ioctl(struct file *filp, | 544 | long kvm_arch_dev_ioctl(struct file *filp, |
537 | unsigned int ioctl, unsigned long arg); | 545 | unsigned int ioctl, unsigned long arg); |
@@ -549,7 +557,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
549 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 557 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
550 | struct | 558 | struct |
551 | kvm_userspace_memory_region *mem, | 559 | kvm_userspace_memory_region *mem, |
552 | int user_alloc); | 560 | bool user_alloc); |
553 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); | 561 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level); |
554 | long kvm_arch_vm_ioctl(struct file *filp, | 562 | long kvm_arch_vm_ioctl(struct file *filp, |
555 | unsigned int ioctl, unsigned long arg); | 563 | unsigned int ioctl, unsigned long arg); |
@@ -685,6 +693,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); | |||
685 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); | 693 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); |
686 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, | 694 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
687 | int irq_source_id, int level); | 695 | int irq_source_id, int level); |
696 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); | ||
688 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); | 697 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
689 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 698 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
690 | struct kvm_irq_ack_notifier *kian); | 699 | struct kvm_irq_ack_notifier *kian); |
@@ -740,15 +749,52 @@ static inline int kvm_deassign_device(struct kvm *kvm, | |||
740 | } | 749 | } |
741 | #endif /* CONFIG_IOMMU_API */ | 750 | #endif /* CONFIG_IOMMU_API */ |
742 | 751 | ||
743 | static inline void kvm_guest_enter(void) | 752 | static inline void __guest_enter(void) |
744 | { | 753 | { |
745 | BUG_ON(preemptible()); | ||
746 | /* | 754 | /* |
747 | * This is running in ioctl context so we can avoid | 755 | * This is running in ioctl context so we can avoid |
748 | * the call to vtime_account() with its unnecessary idle check. | 756 | * the call to vtime_account() with its unnecessary idle check. |
749 | */ | 757 | */ |
750 | vtime_account_system_irqsafe(current); | 758 | vtime_account_system(current); |
751 | current->flags |= PF_VCPU; | 759 | current->flags |= PF_VCPU; |
760 | } | ||
761 | |||
762 | static inline void __guest_exit(void) | ||
763 | { | ||
764 | /* | ||
765 | * This is running in ioctl context so we can avoid | ||
766 | * the call to vtime_account() with its unnecessary idle check. | ||
767 | */ | ||
768 | vtime_account_system(current); | ||
769 | current->flags &= ~PF_VCPU; | ||
770 | } | ||
771 | |||
772 | #ifdef CONFIG_CONTEXT_TRACKING | ||
773 | extern void guest_enter(void); | ||
774 | extern void guest_exit(void); | ||
775 | |||
776 | #else /* !CONFIG_CONTEXT_TRACKING */ | ||
777 | static inline void guest_enter(void) | ||
778 | { | ||
779 | __guest_enter(); | ||
780 | } | ||
781 | |||
782 | static inline void guest_exit(void) | ||
783 | { | ||
784 | __guest_exit(); | ||
785 | } | ||
786 | #endif /* !CONFIG_CONTEXT_TRACKING */ | ||
787 | |||
788 | static inline void kvm_guest_enter(void) | ||
789 | { | ||
790 | unsigned long flags; | ||
791 | |||
792 | BUG_ON(preemptible()); | ||
793 | |||
794 | local_irq_save(flags); | ||
795 | guest_enter(); | ||
796 | local_irq_restore(flags); | ||
797 | |||
752 | /* KVM does not hold any references to rcu protected data when it | 798 | /* KVM does not hold any references to rcu protected data when it |
753 | * switches CPU into a guest mode. In fact switching to a guest mode | 799 | * switches CPU into a guest mode. In fact switching to a guest mode |
754 | * is very similar to exiting to userspase from rcu point of view. In | 800 | * is very similar to exiting to userspase from rcu point of view. In |
@@ -761,12 +807,11 @@ static inline void kvm_guest_enter(void) | |||
761 | 807 | ||
762 | static inline void kvm_guest_exit(void) | 808 | static inline void kvm_guest_exit(void) |
763 | { | 809 | { |
764 | /* | 810 | unsigned long flags; |
765 | * This is running in ioctl context so we can avoid | 811 | |
766 | * the call to vtime_account() with its unnecessary idle check. | 812 | local_irq_save(flags); |
767 | */ | 813 | guest_exit(); |
768 | vtime_account_system_irqsafe(current); | 814 | local_irq_restore(flags); |
769 | current->flags &= ~PF_VCPU; | ||
770 | } | 815 | } |
771 | 816 | ||
772 | /* | 817 | /* |