aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kvm/kvm-ia64.c29
-rw-r--r--arch/powerpc/kvm/powerpc.c16
-rw-r--r--arch/s390/kvm/kvm-s390.c31
-rw-r--r--arch/x86/kvm/i8254.c7
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--arch/x86/kvm/x86.c25
-rw-r--r--include/linux/kvm_host.h11
-rw-r--r--virt/kvm/irq_comm.c6
-rw-r--r--virt/kvm/kvm_main.c19
9 files changed, 76 insertions, 74 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d1f7bcda2c7f..5c766bd82b05 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -337,13 +337,12 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
337{ 337{
338 union ia64_lid lid; 338 union ia64_lid lid;
339 int i; 339 int i;
340 struct kvm_vcpu *vcpu;
340 341
341 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { 342 kvm_for_each_vcpu(i, vcpu, kvm) {
342 if (kvm->vcpus[i]) { 343 lid.val = VCPU_LID(vcpu);
343 lid.val = VCPU_LID(kvm->vcpus[i]); 344 if (lid.id == id && lid.eid == eid)
344 if (lid.id == id && lid.eid == eid) 345 return vcpu;
345 return kvm->vcpus[i];
346 }
347 } 346 }
348 347
349 return NULL; 348 return NULL;
@@ -409,21 +408,21 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
409 struct kvm *kvm = vcpu->kvm; 408 struct kvm *kvm = vcpu->kvm;
410 struct call_data call_data; 409 struct call_data call_data;
411 int i; 410 int i;
411 struct kvm_vcpu *vcpui;
412 412
413 call_data.ptc_g_data = p->u.ptc_g_data; 413 call_data.ptc_g_data = p->u.ptc_g_data;
414 414
415 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { 415 kvm_for_each_vcpu(i, vcpui, kvm) {
416 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == 416 if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
417 KVM_MP_STATE_UNINITIALIZED || 417 vcpu == vcpui)
418 vcpu == kvm->vcpus[i])
419 continue; 418 continue;
420 419
421 if (waitqueue_active(&kvm->vcpus[i]->wq)) 420 if (waitqueue_active(&vcpui->wq))
422 wake_up_interruptible(&kvm->vcpus[i]->wq); 421 wake_up_interruptible(&vcpui->wq);
423 422
424 if (kvm->vcpus[i]->cpu != -1) { 423 if (vcpui->cpu != -1) {
425 call_data.vcpu = kvm->vcpus[i]; 424 call_data.vcpu = vcpui;
426 smp_call_function_single(kvm->vcpus[i]->cpu, 425 smp_call_function_single(vcpui->cpu,
427 vcpu_global_purge, &call_data, 1); 426 vcpu_global_purge, &call_data, 1);
428 } else 427 } else
429 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); 428 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2cf915e51e7e..7ad30e0a1b9a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -122,13 +122,17 @@ struct kvm *kvm_arch_create_vm(void)
122static void kvmppc_free_vcpus(struct kvm *kvm) 122static void kvmppc_free_vcpus(struct kvm *kvm)
123{ 123{
124 unsigned int i; 124 unsigned int i;
125 struct kvm_vcpu *vcpu;
125 126
126 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 127 kvm_for_each_vcpu(i, vcpu, kvm)
127 if (kvm->vcpus[i]) { 128 kvm_arch_vcpu_free(vcpu);
128 kvm_arch_vcpu_free(kvm->vcpus[i]); 129
129 kvm->vcpus[i] = NULL; 130 mutex_lock(&kvm->lock);
130 } 131 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
131 } 132 kvm->vcpus[i] = NULL;
133
134 atomic_set(&kvm->online_vcpus, 0);
135 mutex_unlock(&kvm->lock);
132} 136}
133 137
134void kvm_arch_sync_events(struct kvm *kvm) 138void kvm_arch_sync_events(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 098bfa6fbdf6..07ced89740d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -211,13 +211,17 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
211static void kvm_free_vcpus(struct kvm *kvm) 211static void kvm_free_vcpus(struct kvm *kvm)
212{ 212{
213 unsigned int i; 213 unsigned int i;
214 struct kvm_vcpu *vcpu;
214 215
215 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 216 kvm_for_each_vcpu(i, vcpu, kvm)
216 if (kvm->vcpus[i]) { 217 kvm_arch_vcpu_destroy(vcpu);
217 kvm_arch_vcpu_destroy(kvm->vcpus[i]); 218
218 kvm->vcpus[i] = NULL; 219 mutex_lock(&kvm->lock);
219 } 220 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
220 } 221 kvm->vcpus[i] = NULL;
222
223 atomic_set(&kvm->online_vcpus, 0);
224 mutex_unlock(&kvm->lock);
221} 225}
222 226
223void kvm_arch_sync_events(struct kvm *kvm) 227void kvm_arch_sync_events(struct kvm *kvm)
@@ -314,8 +318,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
314 BUG_ON(!kvm->arch.sca); 318 BUG_ON(!kvm->arch.sca);
315 if (!kvm->arch.sca->cpu[id].sda) 319 if (!kvm->arch.sca->cpu[id].sda)
316 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 320 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
317 else
318 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
319 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
320 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
321 323
@@ -683,6 +685,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
683 int user_alloc) 685 int user_alloc)
684{ 686{
685 int i; 687 int i;
688 struct kvm_vcpu *vcpu;
686 689
687 /* A few sanity checks. We can have exactly one memory slot which has 690 /* A few sanity checks. We can have exactly one memory slot which has
688 to start at guest virtual zero and which has to be located at a 691 to start at guest virtual zero and which has to be located at a
@@ -707,14 +710,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
707 return -EINVAL; 710 return -EINVAL;
708 711
709 /* request update of sie control block for all available vcpus */ 712 /* request update of sie control block for all available vcpus */
710 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 713 kvm_for_each_vcpu(i, vcpu, kvm) {
711 if (kvm->vcpus[i]) { 714 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
712 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, 715 continue;
713 &kvm->vcpus[i]->requests)) 716 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
714 continue;
715 kvm_s390_inject_sigp_stop(kvm->vcpus[i],
716 ACTION_RELOADVCPU_ON_STOP);
717 }
718 } 717 }
719 718
720 return 0; 719 return 0;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 06d8f84ae8a2..15fc95b2fc05 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -669,11 +669,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
669 * VCPU0, and only if its LVT0 is in EXTINT mode. 669 * VCPU0, and only if its LVT0 is in EXTINT mode.
670 */ 670 */
671 if (kvm->arch.vapics_in_nmi_mode > 0) 671 if (kvm->arch.vapics_in_nmi_mode > 0)
672 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 672 kvm_for_each_vcpu(i, vcpu, kvm)
673 vcpu = kvm->vcpus[i]; 673 kvm_apic_nmi_wd_deliver(vcpu);
674 if (vcpu)
675 kvm_apic_nmi_wd_deliver(vcpu);
676 }
677} 674}
678 675
679void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) 676void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d443a421ca3e..5f97dbd24291 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1347,10 +1347,10 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1347static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) 1347static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1348{ 1348{
1349 int i; 1349 int i;
1350 struct kvm_vcpu *vcpu;
1350 1351
1351 for (i = 0; i < KVM_MAX_VCPUS; ++i) 1352 kvm_for_each_vcpu(i, vcpu, kvm)
1352 if (kvm->vcpus[i]) 1353 vcpu->arch.last_pte_updated = NULL;
1353 kvm->vcpus[i]->arch.last_pte_updated = NULL;
1354} 1354}
1355 1355
1356static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) 1356static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d8adc1da76dd..89862a80e32c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2946,10 +2946,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
2946 2946
2947 spin_lock(&kvm_lock); 2947 spin_lock(&kvm_lock);
2948 list_for_each_entry(kvm, &vm_list, vm_list) { 2948 list_for_each_entry(kvm, &vm_list, vm_list) {
2949 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 2949 kvm_for_each_vcpu(i, vcpu, kvm) {
2950 vcpu = kvm->vcpus[i];
2951 if (!vcpu)
2952 continue;
2953 if (vcpu->cpu != freq->cpu) 2950 if (vcpu->cpu != freq->cpu)
2954 continue; 2951 continue;
2955 if (!kvm_request_guest_time_update(vcpu)) 2952 if (!kvm_request_guest_time_update(vcpu))
@@ -4678,20 +4675,22 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4678static void kvm_free_vcpus(struct kvm *kvm) 4675static void kvm_free_vcpus(struct kvm *kvm)
4679{ 4676{
4680 unsigned int i; 4677 unsigned int i;
4678 struct kvm_vcpu *vcpu;
4681 4679
4682 /* 4680 /*
4683 * Unpin any mmu pages first. 4681 * Unpin any mmu pages first.
4684 */ 4682 */
4685 for (i = 0; i < KVM_MAX_VCPUS; ++i) 4683 kvm_for_each_vcpu(i, vcpu, kvm)
4686 if (kvm->vcpus[i]) 4684 kvm_unload_vcpu_mmu(vcpu);
4687 kvm_unload_vcpu_mmu(kvm->vcpus[i]); 4685 kvm_for_each_vcpu(i, vcpu, kvm)
4688 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 4686 kvm_arch_vcpu_free(vcpu);
4689 if (kvm->vcpus[i]) { 4687
4690 kvm_arch_vcpu_free(kvm->vcpus[i]); 4688 mutex_lock(&kvm->lock);
4691 kvm->vcpus[i] = NULL; 4689 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
4692 } 4690 kvm->vcpus[i] = NULL;
4693 }
4694 4691
4692 atomic_set(&kvm->online_vcpus, 0);
4693 mutex_unlock(&kvm->lock);
4695} 4694}
4696 4695
4697void kvm_arch_sync_events(struct kvm *kvm) 4696void kvm_arch_sync_events(struct kvm *kvm)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d3fdf1a738c9..c6e4d02067fe 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -179,6 +179,17 @@ struct kvm {
179#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) 179#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
180#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) 180#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
181 181
182static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
183{
184 smp_rmb();
185 return kvm->vcpus[i];
186}
187
188#define kvm_for_each_vcpu(idx, vcpup, kvm) \
189 for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
190 idx < atomic_read(&kvm->online_vcpus) && vcpup; \
191 vcpup = kvm_get_vcpu(kvm, ++idx))
192
182int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); 193int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
183void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); 194void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
184 195
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 08a9a49481b2..bb8a1b5e41c1 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -68,10 +68,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
68 kvm_is_dm_lowest_prio(irq)) 68 kvm_is_dm_lowest_prio(irq))
69 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); 69 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
70 70
71 for (i = 0; i < KVM_MAX_VCPUS; i++) { 71 kvm_for_each_vcpu(i, vcpu, kvm) {
72 vcpu = kvm->vcpus[i]; 72 if (!kvm_apic_present(vcpu))
73
74 if (!vcpu || !kvm_apic_present(vcpu))
75 continue; 73 continue;
76 74
77 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, 75 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 25e1f9c97b1a..777fe533cfe7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -738,10 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
738 738
739 me = get_cpu(); 739 me = get_cpu();
740 spin_lock(&kvm->requests_lock); 740 spin_lock(&kvm->requests_lock);
741 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 741 kvm_for_each_vcpu(i, vcpu, kvm) {
742 vcpu = kvm->vcpus[i];
743 if (!vcpu)
744 continue;
745 if (test_and_set_bit(req, &vcpu->requests)) 742 if (test_and_set_bit(req, &vcpu->requests))
746 continue; 743 continue;
747 cpu = vcpu->cpu; 744 cpu = vcpu->cpu;
@@ -1718,7 +1715,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1718static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 1715static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1719{ 1716{
1720 int r; 1717 int r;
1721 struct kvm_vcpu *vcpu; 1718 struct kvm_vcpu *vcpu, *v;
1722 1719
1723 vcpu = kvm_arch_vcpu_create(kvm, id); 1720 vcpu = kvm_arch_vcpu_create(kvm, id);
1724 if (IS_ERR(vcpu)) 1721 if (IS_ERR(vcpu))
@@ -1736,8 +1733,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1736 goto vcpu_destroy; 1733 goto vcpu_destroy;
1737 } 1734 }
1738 1735
1739 for (r = 0; r < atomic_read(&kvm->online_vcpus); r++) 1736 kvm_for_each_vcpu(r, v, kvm)
1740 if (kvm->vcpus[r]->vcpu_id == id) { 1737 if (v->vcpu_id == id) {
1741 r = -EEXIST; 1738 r = -EEXIST;
1742 goto vcpu_destroy; 1739 goto vcpu_destroy;
1743 } 1740 }
@@ -2526,11 +2523,9 @@ static int vcpu_stat_get(void *_offset, u64 *val)
2526 *val = 0; 2523 *val = 0;
2527 spin_lock(&kvm_lock); 2524 spin_lock(&kvm_lock);
2528 list_for_each_entry(kvm, &vm_list, vm_list) 2525 list_for_each_entry(kvm, &vm_list, vm_list)
2529 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 2526 kvm_for_each_vcpu(i, vcpu, kvm)
2530 vcpu = kvm->vcpus[i]; 2527 *val += *(u32 *)((void *)vcpu + offset);
2531 if (vcpu) 2528
2532 *val += *(u32 *)((void *)vcpu + offset);
2533 }
2534 spin_unlock(&kvm_lock); 2529 spin_unlock(&kvm_lock);
2535 return 0; 2530 return 0;
2536} 2531}