diff options
Diffstat (limited to 'include/linux/kvm_host.h')
-rw-r--r-- | include/linux/kvm_host.h | 39 |
1 files changed, 36 insertions, 3 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5276fe0916fc..b1fa8f11c95b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -35,6 +35,10 @@ | |||
35 | 35 | ||
36 | #include <asm/kvm_host.h> | 36 | #include <asm/kvm_host.h> |
37 | 37 | ||
38 | #ifndef KVM_MAX_VCPU_ID | ||
39 | #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS | ||
40 | #endif | ||
41 | |||
38 | /* | 42 | /* |
39 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used | 43 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used |
40 | * in kvm, other bits are visible for userspace which are defined in | 44 | * in kvm, other bits are visible for userspace which are defined in |
@@ -225,6 +229,7 @@ struct kvm_vcpu { | |||
225 | sigset_t sigset; | 229 | sigset_t sigset; |
226 | struct kvm_vcpu_stat stat; | 230 | struct kvm_vcpu_stat stat; |
227 | unsigned int halt_poll_ns; | 231 | unsigned int halt_poll_ns; |
232 | bool valid_wakeup; | ||
228 | 233 | ||
229 | #ifdef CONFIG_HAS_IOMEM | 234 | #ifdef CONFIG_HAS_IOMEM |
230 | int mmio_needed; | 235 | int mmio_needed; |
@@ -447,12 +452,13 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | |||
447 | 452 | ||
448 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) | 453 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) |
449 | { | 454 | { |
450 | struct kvm_vcpu *vcpu; | 455 | struct kvm_vcpu *vcpu = NULL; |
451 | int i; | 456 | int i; |
452 | 457 | ||
453 | if (id < 0 || id >= KVM_MAX_VCPUS) | 458 | if (id < 0) |
454 | return NULL; | 459 | return NULL; |
455 | vcpu = kvm_get_vcpu(kvm, id); | 460 | if (id < KVM_MAX_VCPUS) |
461 | vcpu = kvm_get_vcpu(kvm, id); | ||
456 | if (vcpu && vcpu->vcpu_id == id) | 462 | if (vcpu && vcpu->vcpu_id == id) |
457 | return vcpu; | 463 | return vcpu; |
458 | kvm_for_each_vcpu(i, vcpu, kvm) | 464 | kvm_for_each_vcpu(i, vcpu, kvm) |
@@ -651,6 +657,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); | |||
651 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | 657 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
652 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); | 658 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); |
653 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); | 659 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); |
660 | void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); | ||
654 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 661 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
655 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); | 662 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
656 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); | 663 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
@@ -1091,6 +1098,11 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } | |||
1091 | 1098 | ||
1092 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) | 1099 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
1093 | { | 1100 | { |
1101 | /* | ||
1102 | * Ensure the rest of the request is published to kvm_check_request's | ||
1103 | * caller. Paired with the smp_mb__after_atomic in kvm_check_request. | ||
1104 | */ | ||
1105 | smp_wmb(); | ||
1094 | set_bit(req, &vcpu->requests); | 1106 | set_bit(req, &vcpu->requests); |
1095 | } | 1107 | } |
1096 | 1108 | ||
@@ -1098,6 +1110,12 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) | |||
1098 | { | 1110 | { |
1099 | if (test_bit(req, &vcpu->requests)) { | 1111 | if (test_bit(req, &vcpu->requests)) { |
1100 | clear_bit(req, &vcpu->requests); | 1112 | clear_bit(req, &vcpu->requests); |
1113 | |||
1114 | /* | ||
1115 | * Ensure the rest of the request is visible to kvm_check_request's | ||
1116 | * caller. Paired with the smp_wmb in kvm_make_request. | ||
1117 | */ | ||
1118 | smp_mb__after_atomic(); | ||
1101 | return true; | 1119 | return true; |
1102 | } else { | 1120 | } else { |
1103 | return false; | 1121 | return false; |
@@ -1169,6 +1187,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |||
1169 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | 1187 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1170 | 1188 | ||
1171 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS | 1189 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
1190 | bool kvm_arch_has_irq_bypass(void); | ||
1172 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, | 1191 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, |
1173 | struct irq_bypass_producer *); | 1192 | struct irq_bypass_producer *); |
1174 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, | 1193 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, |
@@ -1179,4 +1198,18 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, | |||
1179 | uint32_t guest_irq, bool set); | 1198 | uint32_t guest_irq, bool set); |
1180 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ | 1199 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
1181 | 1200 | ||
1201 | #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS | ||
1202 | /* If we wakeup during the poll time, was it a sucessful poll? */ | ||
1203 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) | ||
1204 | { | ||
1205 | return vcpu->valid_wakeup; | ||
1206 | } | ||
1207 | |||
1208 | #else | ||
1209 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) | ||
1210 | { | ||
1211 | return true; | ||
1212 | } | ||
1213 | #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ | ||
1214 | |||
1182 | #endif | 1215 | #endif |