diff options
| -rw-r--r-- | include/linux/kvm_host.h | 34 | ||||
| -rw-r--r-- | virt/kvm/kvm_main.c | 5 |
2 files changed, 39 insertions, 0 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index db9aa917840a..361b36fe7ecc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -183,6 +183,18 @@ struct kvm_vcpu { | |||
| 183 | } async_pf; | 183 | } async_pf; |
| 184 | #endif | 184 | #endif |
| 185 | 185 | ||
| 186 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | ||
| 187 | /* | ||
| 188 | * Cpu relax intercept or pause loop exit optimization | ||
| 189 | * in_spin_loop: set when a vcpu does a pause loop exit | ||
| 190 | * or cpu relax intercepted. | ||
| 191 | * dy_eligible: indicates whether vcpu is eligible for directed yield. | ||
| 192 | */ | ||
| 193 | struct { | ||
| 194 | bool in_spin_loop; | ||
| 195 | bool dy_eligible; | ||
| 196 | } spin_loop; | ||
| 197 | #endif | ||
| 186 | struct kvm_vcpu_arch arch; | 198 | struct kvm_vcpu_arch arch; |
| 187 | }; | 199 | }; |
| 188 | 200 | ||
| @@ -898,5 +910,27 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) | |||
| 898 | } | 910 | } |
| 899 | } | 911 | } |
| 900 | 912 | ||
| 913 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | ||
| 914 | |||
| 915 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | ||
| 916 | { | ||
| 917 | vcpu->spin_loop.in_spin_loop = val; | ||
| 918 | } | ||
| 919 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | ||
| 920 | { | ||
| 921 | vcpu->spin_loop.dy_eligible = val; | ||
| 922 | } | ||
| 923 | |||
| 924 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | ||
| 925 | |||
| 926 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | ||
| 927 | { | ||
| 928 | } | ||
| 929 | |||
| 930 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | ||
| 931 | { | ||
| 932 | } | ||
| 933 | |||
| 934 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | ||
| 901 | #endif | 935 | #endif |
| 902 | 936 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 68dda513cd72..0892b75eeedd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -239,6 +239,9 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | |||
| 239 | } | 239 | } |
| 240 | vcpu->run = page_address(page); | 240 | vcpu->run = page_address(page); |
| 241 | 241 | ||
| 242 | kvm_vcpu_set_in_spin_loop(vcpu, false); | ||
| 243 | kvm_vcpu_set_dy_eligible(vcpu, false); | ||
| 244 | |||
| 242 | r = kvm_arch_vcpu_init(vcpu); | 245 | r = kvm_arch_vcpu_init(vcpu); |
| 243 | if (r < 0) | 246 | if (r < 0) |
| 244 | goto fail_free_run; | 247 | goto fail_free_run; |
| @@ -1585,6 +1588,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) | |||
| 1585 | int pass; | 1588 | int pass; |
| 1586 | int i; | 1589 | int i; |
| 1587 | 1590 | ||
| 1591 | kvm_vcpu_set_in_spin_loop(me, true); | ||
| 1588 | /* | 1592 | /* |
| 1589 | * We boost the priority of a VCPU that is runnable but not | 1593 | * We boost the priority of a VCPU that is runnable but not |
| 1590 | * currently running, because it got preempted by something | 1594 | * currently running, because it got preempted by something |
| @@ -1610,6 +1614,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) | |||
| 1610 | } | 1614 | } |
| 1611 | } | 1615 | } |
| 1612 | } | 1616 | } |
| 1617 | kvm_vcpu_set_in_spin_loop(me, false); | ||
| 1613 | } | 1618 | } |
| 1614 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); | 1619 | EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); |
| 1615 | 1620 | ||
