aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2009-06-09 08:56:29 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:52 -0400
commit988a2cae6a3c0dea6df59808a935a9a697bfc28c (patch)
treec1118d86c5d6f24fe738c608917b0affb311f26d /arch
parent73880c80aa9c8dc353cd0ad26579023213cd5314 (diff)
KVM: Use macro to iterate over vcpus.
[christian: remove unused variables on s390] Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c29
-rw-r--r--arch/powerpc/kvm/powerpc.c16
-rw-r--r--arch/s390/kvm/kvm-s390.c31
-rw-r--r--arch/x86/kvm/i8254.c7
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--arch/x86/kvm/x86.c25
6 files changed, 56 insertions, 58 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d1f7bcda2c7f..5c766bd82b05 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -337,13 +337,12 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
337{ 337{
338 union ia64_lid lid; 338 union ia64_lid lid;
339 int i; 339 int i;
340 struct kvm_vcpu *vcpu;
340 341
341 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { 342 kvm_for_each_vcpu(i, vcpu, kvm) {
342 if (kvm->vcpus[i]) { 343 lid.val = VCPU_LID(vcpu);
343 lid.val = VCPU_LID(kvm->vcpus[i]); 344 if (lid.id == id && lid.eid == eid)
344 if (lid.id == id && lid.eid == eid) 345 return vcpu;
345 return kvm->vcpus[i];
346 }
347 } 346 }
348 347
349 return NULL; 348 return NULL;
@@ -409,21 +408,21 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
409 struct kvm *kvm = vcpu->kvm; 408 struct kvm *kvm = vcpu->kvm;
410 struct call_data call_data; 409 struct call_data call_data;
411 int i; 410 int i;
411 struct kvm_vcpu *vcpui;
412 412
413 call_data.ptc_g_data = p->u.ptc_g_data; 413 call_data.ptc_g_data = p->u.ptc_g_data;
414 414
415 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { 415 kvm_for_each_vcpu(i, vcpui, kvm) {
416 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == 416 if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
417 KVM_MP_STATE_UNINITIALIZED || 417 vcpu == vcpui)
418 vcpu == kvm->vcpus[i])
419 continue; 418 continue;
420 419
421 if (waitqueue_active(&kvm->vcpus[i]->wq)) 420 if (waitqueue_active(&vcpui->wq))
422 wake_up_interruptible(&kvm->vcpus[i]->wq); 421 wake_up_interruptible(&vcpui->wq);
423 422
424 if (kvm->vcpus[i]->cpu != -1) { 423 if (vcpui->cpu != -1) {
425 call_data.vcpu = kvm->vcpus[i]; 424 call_data.vcpu = vcpui;
426 smp_call_function_single(kvm->vcpus[i]->cpu, 425 smp_call_function_single(vcpui->cpu,
427 vcpu_global_purge, &call_data, 1); 426 vcpu_global_purge, &call_data, 1);
428 } else 427 } else
429 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); 428 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2cf915e51e7e..7ad30e0a1b9a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -122,13 +122,17 @@ struct kvm *kvm_arch_create_vm(void)
122static void kvmppc_free_vcpus(struct kvm *kvm) 122static void kvmppc_free_vcpus(struct kvm *kvm)
123{ 123{
124 unsigned int i; 124 unsigned int i;
125 struct kvm_vcpu *vcpu;
125 126
126 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 127 kvm_for_each_vcpu(i, vcpu, kvm)
127 if (kvm->vcpus[i]) { 128 kvm_arch_vcpu_free(vcpu);
128 kvm_arch_vcpu_free(kvm->vcpus[i]); 129
129 kvm->vcpus[i] = NULL; 130 mutex_lock(&kvm->lock);
130 } 131 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
131 } 132 kvm->vcpus[i] = NULL;
133
134 atomic_set(&kvm->online_vcpus, 0);
135 mutex_unlock(&kvm->lock);
132} 136}
133 137
134void kvm_arch_sync_events(struct kvm *kvm) 138void kvm_arch_sync_events(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 098bfa6fbdf6..07ced89740d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -211,13 +211,17 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
211static void kvm_free_vcpus(struct kvm *kvm) 211static void kvm_free_vcpus(struct kvm *kvm)
212{ 212{
213 unsigned int i; 213 unsigned int i;
214 struct kvm_vcpu *vcpu;
214 215
215 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 216 kvm_for_each_vcpu(i, vcpu, kvm)
216 if (kvm->vcpus[i]) { 217 kvm_arch_vcpu_destroy(vcpu);
217 kvm_arch_vcpu_destroy(kvm->vcpus[i]); 218
218 kvm->vcpus[i] = NULL; 219 mutex_lock(&kvm->lock);
219 } 220 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
220 } 221 kvm->vcpus[i] = NULL;
222
223 atomic_set(&kvm->online_vcpus, 0);
224 mutex_unlock(&kvm->lock);
221} 225}
222 226
223void kvm_arch_sync_events(struct kvm *kvm) 227void kvm_arch_sync_events(struct kvm *kvm)
@@ -314,8 +318,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
314 BUG_ON(!kvm->arch.sca); 318 BUG_ON(!kvm->arch.sca);
315 if (!kvm->arch.sca->cpu[id].sda) 319 if (!kvm->arch.sca->cpu[id].sda)
316 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 320 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
317 else
318 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
319 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
320 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
321 323
@@ -683,6 +685,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
683 int user_alloc) 685 int user_alloc)
684{ 686{
685 int i; 687 int i;
688 struct kvm_vcpu *vcpu;
686 689
687 /* A few sanity checks. We can have exactly one memory slot which has 690 /* A few sanity checks. We can have exactly one memory slot which has
688 to start at guest virtual zero and which has to be located at a 691 to start at guest virtual zero and which has to be located at a
@@ -707,14 +710,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
707 return -EINVAL; 710 return -EINVAL;
708 711
709 /* request update of sie control block for all available vcpus */ 712 /* request update of sie control block for all available vcpus */
710 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 713 kvm_for_each_vcpu(i, vcpu, kvm) {
711 if (kvm->vcpus[i]) { 714 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
712 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, 715 continue;
713 &kvm->vcpus[i]->requests)) 716 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
714 continue;
715 kvm_s390_inject_sigp_stop(kvm->vcpus[i],
716 ACTION_RELOADVCPU_ON_STOP);
717 }
718 } 717 }
719 718
720 return 0; 719 return 0;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 06d8f84ae8a2..15fc95b2fc05 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -669,11 +669,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
669 * VCPU0, and only if its LVT0 is in EXTINT mode. 669 * VCPU0, and only if its LVT0 is in EXTINT mode.
670 */ 670 */
671 if (kvm->arch.vapics_in_nmi_mode > 0) 671 if (kvm->arch.vapics_in_nmi_mode > 0)
672 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 672 kvm_for_each_vcpu(i, vcpu, kvm)
673 vcpu = kvm->vcpus[i]; 673 kvm_apic_nmi_wd_deliver(vcpu);
674 if (vcpu)
675 kvm_apic_nmi_wd_deliver(vcpu);
676 }
677} 674}
678 675
679void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) 676void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d443a421ca3e..5f97dbd24291 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1347,10 +1347,10 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1347static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) 1347static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1348{ 1348{
1349 int i; 1349 int i;
1350 struct kvm_vcpu *vcpu;
1350 1351
1351 for (i = 0; i < KVM_MAX_VCPUS; ++i) 1352 kvm_for_each_vcpu(i, vcpu, kvm)
1352 if (kvm->vcpus[i]) 1353 vcpu->arch.last_pte_updated = NULL;
1353 kvm->vcpus[i]->arch.last_pte_updated = NULL;
1354} 1354}
1355 1355
1356static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) 1356static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d8adc1da76dd..89862a80e32c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2946,10 +2946,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
2946 2946
2947 spin_lock(&kvm_lock); 2947 spin_lock(&kvm_lock);
2948 list_for_each_entry(kvm, &vm_list, vm_list) { 2948 list_for_each_entry(kvm, &vm_list, vm_list) {
2949 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 2949 kvm_for_each_vcpu(i, vcpu, kvm) {
2950 vcpu = kvm->vcpus[i];
2951 if (!vcpu)
2952 continue;
2953 if (vcpu->cpu != freq->cpu) 2950 if (vcpu->cpu != freq->cpu)
2954 continue; 2951 continue;
2955 if (!kvm_request_guest_time_update(vcpu)) 2952 if (!kvm_request_guest_time_update(vcpu))
@@ -4678,20 +4675,22 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4678static void kvm_free_vcpus(struct kvm *kvm) 4675static void kvm_free_vcpus(struct kvm *kvm)
4679{ 4676{
4680 unsigned int i; 4677 unsigned int i;
4678 struct kvm_vcpu *vcpu;
4681 4679
4682 /* 4680 /*
4683 * Unpin any mmu pages first. 4681 * Unpin any mmu pages first.
4684 */ 4682 */
4685 for (i = 0; i < KVM_MAX_VCPUS; ++i) 4683 kvm_for_each_vcpu(i, vcpu, kvm)
4686 if (kvm->vcpus[i]) 4684 kvm_unload_vcpu_mmu(vcpu);
4687 kvm_unload_vcpu_mmu(kvm->vcpus[i]); 4685 kvm_for_each_vcpu(i, vcpu, kvm)
4688 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 4686 kvm_arch_vcpu_free(vcpu);
4689 if (kvm->vcpus[i]) { 4687
4690 kvm_arch_vcpu_free(kvm->vcpus[i]); 4688 mutex_lock(&kvm->lock);
4691 kvm->vcpus[i] = NULL; 4689 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
4692 } 4690 kvm->vcpus[i] = NULL;
4693 }
4694 4691
4692 atomic_set(&kvm->online_vcpus, 0);
4693 mutex_unlock(&kvm->lock);
4695} 4694}
4696 4695
4697void kvm_arch_sync_events(struct kvm *kvm) 4696void kvm_arch_sync_events(struct kvm *kvm)