diff options
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 37 |
1 files changed, 13 insertions, 24 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index af2d288c881d..07ae280e8fe5 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -198,21 +198,15 @@ static void vcpu_put(struct kvm_vcpu *vcpu) | |||
198 | 198 | ||
199 | static void ack_flush(void *_completed) | 199 | static void ack_flush(void *_completed) |
200 | { | 200 | { |
201 | atomic_t *completed = _completed; | ||
202 | |||
203 | atomic_inc(completed); | ||
204 | } | 201 | } |
205 | 202 | ||
206 | void kvm_flush_remote_tlbs(struct kvm *kvm) | 203 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
207 | { | 204 | { |
208 | int i, cpu, needed; | 205 | int i, cpu; |
209 | cpumask_t cpus; | 206 | cpumask_t cpus; |
210 | struct kvm_vcpu *vcpu; | 207 | struct kvm_vcpu *vcpu; |
211 | atomic_t completed; | ||
212 | 208 | ||
213 | atomic_set(&completed, 0); | ||
214 | cpus_clear(cpus); | 209 | cpus_clear(cpus); |
215 | needed = 0; | ||
216 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 210 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
217 | vcpu = kvm->vcpus[i]; | 211 | vcpu = kvm->vcpus[i]; |
218 | if (!vcpu) | 212 | if (!vcpu) |
@@ -221,23 +215,9 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) | |||
221 | continue; | 215 | continue; |
222 | cpu = vcpu->cpu; | 216 | cpu = vcpu->cpu; |
223 | if (cpu != -1 && cpu != raw_smp_processor_id()) | 217 | if (cpu != -1 && cpu != raw_smp_processor_id()) |
224 | if (!cpu_isset(cpu, cpus)) { | 218 | cpu_set(cpu, cpus); |
225 | cpu_set(cpu, cpus); | ||
226 | ++needed; | ||
227 | } | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * We really want smp_call_function_mask() here. But that's not | ||
232 | * available, so ipi all cpus in parallel and wait for them | ||
233 | * to complete. | ||
234 | */ | ||
235 | for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus)) | ||
236 | smp_call_function_single(cpu, ack_flush, &completed, 1, 0); | ||
237 | while (atomic_read(&completed) != needed) { | ||
238 | cpu_relax(); | ||
239 | barrier(); | ||
240 | } | 219 | } |
220 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | ||
241 | } | 221 | } |
242 | 222 | ||
243 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | 223 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
@@ -2054,12 +2034,21 @@ again: | |||
2054 | 2034 | ||
2055 | kvm_x86_ops->run(vcpu, kvm_run); | 2035 | kvm_x86_ops->run(vcpu, kvm_run); |
2056 | 2036 | ||
2057 | kvm_guest_exit(); | ||
2058 | vcpu->guest_mode = 0; | 2037 | vcpu->guest_mode = 0; |
2059 | local_irq_enable(); | 2038 | local_irq_enable(); |
2060 | 2039 | ||
2061 | ++vcpu->stat.exits; | 2040 | ++vcpu->stat.exits; |
2062 | 2041 | ||
2042 | /* | ||
2043 | * We must have an instruction between local_irq_enable() and | ||
2044 | * kvm_guest_exit(), so the timer interrupt isn't delayed by | ||
2045 | * the interrupt shadow. The stat.exits increment will do nicely. | ||
2046 | * But we need to prevent reordering, hence this barrier(): | ||
2047 | */ | ||
2048 | barrier(); | ||
2049 | |||
2050 | kvm_guest_exit(); | ||
2051 | |||
2063 | preempt_enable(); | 2052 | preempt_enable(); |
2064 | 2053 | ||
2065 | /* | 2054 | /* |