aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLaurent Vivier <Laurent.Vivier@bull.net>2007-10-22 10:33:07 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-22 11:21:54 -0400
commit49d3bd7e2b990e717aa66e229410b8f5096c4956 (patch)
tree6979af80c6f628f554cf70514a94f025af99f031 /drivers
parent83d87d167367ae2cc2c6810399aefac33a2ced41 (diff)
KVM: Use new smp_call_function_mask() in kvm_flush_remote_tlbs()
In kvm_flush_remote_tlbs(), replace a loop using smp_call_function_single() by a single call to smp_call_function_mask() (which is new for x86_64). Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm_main.c26
1 files changed, 3 insertions, 23 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 8c458f262872..07ae280e8fe5 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -198,21 +198,15 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
198 198
199static void ack_flush(void *_completed) 199static void ack_flush(void *_completed)
200{ 200{
201 atomic_t *completed = _completed;
202
203 atomic_inc(completed);
204} 201}
205 202
206void kvm_flush_remote_tlbs(struct kvm *kvm) 203void kvm_flush_remote_tlbs(struct kvm *kvm)
207{ 204{
208 int i, cpu, needed; 205 int i, cpu;
209 cpumask_t cpus; 206 cpumask_t cpus;
210 struct kvm_vcpu *vcpu; 207 struct kvm_vcpu *vcpu;
211 atomic_t completed;
212 208
213 atomic_set(&completed, 0);
214 cpus_clear(cpus); 209 cpus_clear(cpus);
215 needed = 0;
216 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 210 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
217 vcpu = kvm->vcpus[i]; 211 vcpu = kvm->vcpus[i];
218 if (!vcpu) 212 if (!vcpu)
@@ -221,23 +215,9 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
221 continue; 215 continue;
222 cpu = vcpu->cpu; 216 cpu = vcpu->cpu;
223 if (cpu != -1 && cpu != raw_smp_processor_id()) 217 if (cpu != -1 && cpu != raw_smp_processor_id())
224 if (!cpu_isset(cpu, cpus)) { 218 cpu_set(cpu, cpus);
225 cpu_set(cpu, cpus);
226 ++needed;
227 }
228 }
229
230 /*
231 * We really want smp_call_function_mask() here. But that's not
232 * available, so ipi all cpus in parallel and wait for them
233 * to complete.
234 */
235 for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
236 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
237 while (atomic_read(&completed) != needed) {
238 cpu_relax();
239 barrier();
240 } 219 }
220 smp_call_function_mask(cpus, ack_flush, NULL, 1);
241} 221}
242 222
243int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 223int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)