diff options
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 53 |
1 files changed, 40 insertions, 13 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d5cddd8dcc5c..2c497ab0d03d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -47,28 +47,40 @@ | |||
47 | 47 | ||
48 | /* | 48 | /* |
49 | * For the normal pfn, the highest 12 bits should be zero, | 49 | * For the normal pfn, the highest 12 bits should be zero, |
50 | * so we can mask these bits to indicate the error. | 50 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
51 | * mask bit 63 to indicate the noslot pfn. | ||
51 | */ | 52 | */ |
52 | #define KVM_PFN_ERR_MASK (0xfffULL << 52) | 53 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
54 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) | ||
55 | #define KVM_PFN_NOSLOT (0x1ULL << 63) | ||
53 | 56 | ||
54 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) | 57 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) |
55 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) | 58 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) |
56 | #define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2) | 59 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
57 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3) | ||
58 | 60 | ||
61 | /* | ||
62 | * error pfns indicate that the gfn is in slot but faild to | ||
63 | * translate it to pfn on host. | ||
64 | */ | ||
59 | static inline bool is_error_pfn(pfn_t pfn) | 65 | static inline bool is_error_pfn(pfn_t pfn) |
60 | { | 66 | { |
61 | return !!(pfn & KVM_PFN_ERR_MASK); | 67 | return !!(pfn & KVM_PFN_ERR_MASK); |
62 | } | 68 | } |
63 | 69 | ||
64 | static inline bool is_noslot_pfn(pfn_t pfn) | 70 | /* |
71 | * error_noslot pfns indicate that the gfn can not be | ||
72 | * translated to pfn - it is not in slot or failed to | ||
73 | * translate it to pfn. | ||
74 | */ | ||
75 | static inline bool is_error_noslot_pfn(pfn_t pfn) | ||
65 | { | 76 | { |
66 | return pfn == KVM_PFN_ERR_BAD; | 77 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
67 | } | 78 | } |
68 | 79 | ||
69 | static inline bool is_invalid_pfn(pfn_t pfn) | 80 | /* noslot pfn indicates that the gfn is not in slot. */ |
81 | static inline bool is_noslot_pfn(pfn_t pfn) | ||
70 | { | 82 | { |
71 | return !is_noslot_pfn(pfn) && is_error_pfn(pfn); | 83 | return pfn == KVM_PFN_NOSLOT; |
72 | } | 84 | } |
73 | 85 | ||
74 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) | 86 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
@@ -107,6 +119,9 @@ static inline bool is_error_page(struct page *page) | |||
107 | #define KVM_REQ_IMMEDIATE_EXIT 15 | 119 | #define KVM_REQ_IMMEDIATE_EXIT 15 |
108 | #define KVM_REQ_PMU 16 | 120 | #define KVM_REQ_PMU 16 |
109 | #define KVM_REQ_PMI 17 | 121 | #define KVM_REQ_PMI 17 |
122 | #define KVM_REQ_WATCHDOG 18 | ||
123 | #define KVM_REQ_MASTERCLOCK_UPDATE 19 | ||
124 | #define KVM_REQ_MCLOCK_INPROGRESS 20 | ||
110 | 125 | ||
111 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 126 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
112 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | 127 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
@@ -516,6 +531,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | |||
516 | 531 | ||
517 | void kvm_flush_remote_tlbs(struct kvm *kvm); | 532 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
518 | void kvm_reload_remote_mmus(struct kvm *kvm); | 533 | void kvm_reload_remote_mmus(struct kvm *kvm); |
534 | void kvm_make_mclock_inprogress_request(struct kvm *kvm); | ||
519 | 535 | ||
520 | long kvm_arch_dev_ioctl(struct file *filp, | 536 | long kvm_arch_dev_ioctl(struct file *filp, |
521 | unsigned int ioctl, unsigned long arg); | 537 | unsigned int ioctl, unsigned long arg); |
@@ -569,9 +585,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |||
569 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | 585 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
570 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); | 586 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
571 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); | 587 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
588 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); | ||
572 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); | 589 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
573 | 590 | ||
574 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); | ||
575 | int kvm_arch_hardware_enable(void *garbage); | 591 | int kvm_arch_hardware_enable(void *garbage); |
576 | void kvm_arch_hardware_disable(void *garbage); | 592 | void kvm_arch_hardware_disable(void *garbage); |
577 | int kvm_arch_hardware_setup(void); | 593 | int kvm_arch_hardware_setup(void); |
@@ -666,6 +682,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | |||
666 | unsigned long *deliver_bitmask); | 682 | unsigned long *deliver_bitmask); |
667 | #endif | 683 | #endif |
668 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); | 684 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level); |
685 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); | ||
669 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, | 686 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
670 | int irq_source_id, int level); | 687 | int irq_source_id, int level); |
671 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); | 688 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
@@ -838,9 +855,9 @@ extern struct kvm_stats_debugfs_item debugfs_entries[]; | |||
838 | extern struct dentry *kvm_debugfs_dir; | 855 | extern struct dentry *kvm_debugfs_dir; |
839 | 856 | ||
840 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 857 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
841 | static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) | 858 | static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
842 | { | 859 | { |
843 | if (unlikely(vcpu->kvm->mmu_notifier_count)) | 860 | if (unlikely(kvm->mmu_notifier_count)) |
844 | return 1; | 861 | return 1; |
845 | /* | 862 | /* |
846 | * Ensure the read of mmu_notifier_count happens before the read | 863 | * Ensure the read of mmu_notifier_count happens before the read |
@@ -853,7 +870,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se | |||
853 | * can't rely on kvm->mmu_lock to keep things ordered. | 870 | * can't rely on kvm->mmu_lock to keep things ordered. |
854 | */ | 871 | */ |
855 | smp_rmb(); | 872 | smp_rmb(); |
856 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) | 873 | if (kvm->mmu_notifier_seq != mmu_seq) |
857 | return 1; | 874 | return 1; |
858 | return 0; | 875 | return 0; |
859 | } | 876 | } |
@@ -881,10 +898,20 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |||
881 | #ifdef CONFIG_HAVE_KVM_EVENTFD | 898 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
882 | 899 | ||
883 | void kvm_eventfd_init(struct kvm *kvm); | 900 | void kvm_eventfd_init(struct kvm *kvm); |
901 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); | ||
902 | |||
903 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | ||
884 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); | 904 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
885 | void kvm_irqfd_release(struct kvm *kvm); | 905 | void kvm_irqfd_release(struct kvm *kvm); |
886 | void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); | 906 | void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); |
887 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); | 907 | #else |
908 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) | ||
909 | { | ||
910 | return -EINVAL; | ||
911 | } | ||
912 | |||
913 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | ||
914 | #endif | ||
888 | 915 | ||
889 | #else | 916 | #else |
890 | 917 | ||