aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-01-12 02:40:31 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2011-03-17 12:08:26 -0400
commit6b7e2d0991489559a1df4500d77f7b76c4607ed0 (patch)
treeb82e941c3ca4d519c71577ad21807af4d02b0679 /arch/x86/kvm
parentd48ead8b0b48862a87138d04efb7580a1a25beb5 (diff)
KVM: Add "exiting guest mode" state
Currently we keep track of only two states: guest mode and host mode. This patch adds an "exiting guest mode" state that tells us that an IPI will happen soon, so unless we need to wait for the IPI, we can avoid it completely. Also 1: No need atomically to read/write ->mode in vcpu's thread 2: reorganize struct kvm_vcpu to make ->mode and ->requests in the same cache line explicitly Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/x86.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5eccdba08bd0..a7f65aa6eef6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5210,14 +5210,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5210 kvm_load_guest_fpu(vcpu); 5210 kvm_load_guest_fpu(vcpu);
5211 kvm_load_guest_xcr0(vcpu); 5211 kvm_load_guest_xcr0(vcpu);
5212 5212
5213 atomic_set(&vcpu->guest_mode, 1); 5213 vcpu->mode = IN_GUEST_MODE;
5214 smp_wmb(); 5214
5215 /* We should set ->mode before check ->requests,
5216 * see the comment in make_all_cpus_request.
5217 */
5218 smp_mb();
5215 5219
5216 local_irq_disable(); 5220 local_irq_disable();
5217 5221
5218 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests 5222 if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
5219 || need_resched() || signal_pending(current)) { 5223 || need_resched() || signal_pending(current)) {
5220 atomic_set(&vcpu->guest_mode, 0); 5224 vcpu->mode = OUTSIDE_GUEST_MODE;
5221 smp_wmb(); 5225 smp_wmb();
5222 local_irq_enable(); 5226 local_irq_enable();
5223 preempt_enable(); 5227 preempt_enable();
@@ -5253,7 +5257,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5253 5257
5254 kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc); 5258 kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
5255 5259
5256 atomic_set(&vcpu->guest_mode, 0); 5260 vcpu->mode = OUTSIDE_GUEST_MODE;
5257 smp_wmb(); 5261 smp_wmb();
5258 local_irq_enable(); 5262 local_irq_enable();
5259 5263
@@ -6157,7 +6161,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
6157 6161
6158 me = get_cpu(); 6162 me = get_cpu();
6159 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 6163 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
6160 if (atomic_xchg(&vcpu->guest_mode, 0)) 6164 if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
6161 smp_send_reschedule(cpu); 6165 smp_send_reschedule(cpu);
6162 put_cpu(); 6166 put_cpu();
6163} 6167}