diff options
| -rw-r--r-- | drivers/kvm/kvm_main.c | 26 |
1 files changed, 3 insertions, 23 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 8c458f262872..07ae280e8fe5 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
| @@ -198,21 +198,15 @@ static void vcpu_put(struct kvm_vcpu *vcpu) | |||
| 198 | 198 | ||
| 199 | static void ack_flush(void *_completed) | 199 | static void ack_flush(void *_completed) |
| 200 | { | 200 | { |
| 201 | atomic_t *completed = _completed; | ||
| 202 | |||
| 203 | atomic_inc(completed); | ||
| 204 | } | 201 | } |
| 205 | 202 | ||
| 206 | void kvm_flush_remote_tlbs(struct kvm *kvm) | 203 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
| 207 | { | 204 | { |
| 208 | int i, cpu, needed; | 205 | int i, cpu; |
| 209 | cpumask_t cpus; | 206 | cpumask_t cpus; |
| 210 | struct kvm_vcpu *vcpu; | 207 | struct kvm_vcpu *vcpu; |
| 211 | atomic_t completed; | ||
| 212 | 208 | ||
| 213 | atomic_set(&completed, 0); | ||
| 214 | cpus_clear(cpus); | 209 | cpus_clear(cpus); |
| 215 | needed = 0; | ||
| 216 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 210 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
| 217 | vcpu = kvm->vcpus[i]; | 211 | vcpu = kvm->vcpus[i]; |
| 218 | if (!vcpu) | 212 | if (!vcpu) |
| @@ -221,23 +215,9 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) | |||
| 221 | continue; | 215 | continue; |
| 222 | cpu = vcpu->cpu; | 216 | cpu = vcpu->cpu; |
| 223 | if (cpu != -1 && cpu != raw_smp_processor_id()) | 217 | if (cpu != -1 && cpu != raw_smp_processor_id()) |
| 224 | if (!cpu_isset(cpu, cpus)) { | 218 | cpu_set(cpu, cpus); |
| 225 | cpu_set(cpu, cpus); | ||
| 226 | ++needed; | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | /* | ||
| 231 | * We really want smp_call_function_mask() here. But that's not | ||
| 232 | * available, so ipi all cpus in parallel and wait for them | ||
| 233 | * to complete. | ||
| 234 | */ | ||
| 235 | for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus)) | ||
| 236 | smp_call_function_single(cpu, ack_flush, &completed, 1, 0); | ||
| 237 | while (atomic_read(&completed) != needed) { | ||
| 238 | cpu_relax(); | ||
| 239 | barrier(); | ||
| 240 | } | 219 | } |
| 220 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | ||
| 241 | } | 221 | } |
| 242 | 222 | ||
| 243 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | 223 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
