aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kvm/kvm-ia64.c15
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c43
-rw-r--r--include/linux/kvm_host.h2
6 files changed, 45 insertions, 38 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d0ad538f0083..d5e384641275 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -636,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
636static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 636static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
637{ 637{
638 union context *host_ctx, *guest_ctx; 638 union context *host_ctx, *guest_ctx;
639 int r; 639 int r, idx;
640 640
641 /* 641 idx = srcu_read_lock(&vcpu->kvm->srcu);
642 * down_read() may sleep and return with interrupts enabled
643 */
644 down_read(&vcpu->kvm->slots_lock);
645 642
646again: 643again:
647 if (signal_pending(current)) { 644 if (signal_pending(current)) {
@@ -663,7 +660,7 @@ again:
663 if (r < 0) 660 if (r < 0)
664 goto vcpu_run_fail; 661 goto vcpu_run_fail;
665 662
666 up_read(&vcpu->kvm->slots_lock); 663 srcu_read_unlock(&vcpu->kvm->srcu, idx);
667 kvm_guest_enter(); 664 kvm_guest_enter();
668 665
669 /* 666 /*
@@ -687,7 +684,7 @@ again:
687 kvm_guest_exit(); 684 kvm_guest_exit();
688 preempt_enable(); 685 preempt_enable();
689 686
690 down_read(&vcpu->kvm->slots_lock); 687 idx = srcu_read_lock(&vcpu->kvm->srcu);
691 688
692 r = kvm_handle_exit(kvm_run, vcpu); 689 r = kvm_handle_exit(kvm_run, vcpu);
693 690
@@ -697,10 +694,10 @@ again:
697 } 694 }
698 695
699out: 696out:
700 up_read(&vcpu->kvm->slots_lock); 697 srcu_read_unlock(&vcpu->kvm->srcu, idx);
701 if (r > 0) { 698 if (r > 0) {
702 kvm_resched(vcpu); 699 kvm_resched(vcpu);
703 down_read(&vcpu->kvm->slots_lock); 700 idx = srcu_read_lock(&vcpu->kvm->srcu);
704 goto again; 701 goto again;
705 } 702 }
706 703
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 06cce8285ba0..60f09ab3672c 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -67,10 +67,14 @@ static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
67 67
68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) 68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
69{ 69{
70 int idx;
70 struct kvm_memory_slot *mem; 71 struct kvm_memory_slot *mem;
72 struct kvm_memslots *memslots;
71 73
72 down_read(&vcpu->kvm->slots_lock); 74 idx = srcu_read_lock(&vcpu->kvm->srcu);
73 mem = &vcpu->kvm->memslots[0]; 75 memslots = rcu_dereference(vcpu->kvm->memslots);
76
77 mem = &memslots->memslots[0];
74 78
75 vcpu->arch.sie_block->gmsor = mem->userspace_addr; 79 vcpu->arch.sie_block->gmsor = mem->userspace_addr;
76 vcpu->arch.sie_block->gmslm = 80 vcpu->arch.sie_block->gmslm =
@@ -78,7 +82,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
78 (mem->npages << PAGE_SHIFT) + 82 (mem->npages << PAGE_SHIFT) +
79 VIRTIODESCSPACE - 1ul; 83 VIRTIODESCSPACE - 1ul;
80 84
81 up_read(&vcpu->kvm->slots_lock); 85 srcu_read_unlock(&vcpu->kvm->srcu, idx);
82} 86}
83 87
84/* implemented in priv.c */ 88/* implemented in priv.c */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f8bf42a25995..25aabd00aa01 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2933,10 +2933,9 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2933 spin_lock(&kvm_lock); 2933 spin_lock(&kvm_lock);
2934 2934
2935 list_for_each_entry(kvm, &vm_list, vm_list) { 2935 list_for_each_entry(kvm, &vm_list, vm_list) {
2936 int npages; 2936 int npages, idx;
2937 2937
2938 if (!down_read_trylock(&kvm->slots_lock)) 2938 idx = srcu_read_lock(&kvm->srcu);
2939 continue;
2940 spin_lock(&kvm->mmu_lock); 2939 spin_lock(&kvm->mmu_lock);
2941 npages = kvm->arch.n_alloc_mmu_pages - 2940 npages = kvm->arch.n_alloc_mmu_pages -
2942 kvm->arch.n_free_mmu_pages; 2941 kvm->arch.n_free_mmu_pages;
@@ -2949,7 +2948,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2949 nr_to_scan--; 2948 nr_to_scan--;
2950 2949
2951 spin_unlock(&kvm->mmu_lock); 2950 spin_unlock(&kvm->mmu_lock);
2952 up_read(&kvm->slots_lock); 2951 srcu_read_unlock(&kvm->srcu, idx);
2953 } 2952 }
2954 if (kvm_freed) 2953 if (kvm_freed)
2955 list_move_tail(&kvm_freed->vm_list, &vm_list); 2954 list_move_tail(&kvm_freed->vm_list, &vm_list);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f1cae7d6113d..22ab7137d1d0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2478,10 +2478,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2478{ 2478{
2479 struct vcpu_vmx *vmx = to_vmx(vcpu); 2479 struct vcpu_vmx *vmx = to_vmx(vcpu);
2480 u64 msr; 2480 u64 msr;
2481 int ret; 2481 int ret, idx;
2482 2482
2483 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); 2483 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2484 down_read(&vcpu->kvm->slots_lock); 2484 idx = srcu_read_lock(&vcpu->kvm->srcu);
2485 if (!init_rmode(vmx->vcpu.kvm)) { 2485 if (!init_rmode(vmx->vcpu.kvm)) {
2486 ret = -ENOMEM; 2486 ret = -ENOMEM;
2487 goto out; 2487 goto out;
@@ -2589,7 +2589,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2589 vmx->emulation_required = 0; 2589 vmx->emulation_required = 0;
2590 2590
2591out: 2591out:
2592 up_read(&vcpu->kvm->slots_lock); 2592 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2593 return ret; 2593 return ret;
2594} 2594}
2595 2595
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9b42673df4af..53bc06a68105 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1306,15 +1306,15 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1306 int (*do_msr)(struct kvm_vcpu *vcpu, 1306 int (*do_msr)(struct kvm_vcpu *vcpu,
1307 unsigned index, u64 *data)) 1307 unsigned index, u64 *data))
1308{ 1308{
1309 int i; 1309 int i, idx;
1310 1310
1311 vcpu_load(vcpu); 1311 vcpu_load(vcpu);
1312 1312
1313 down_read(&vcpu->kvm->slots_lock); 1313 idx = srcu_read_lock(&vcpu->kvm->srcu);
1314 for (i = 0; i < msrs->nmsrs; ++i) 1314 for (i = 0; i < msrs->nmsrs; ++i)
1315 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 1315 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1316 break; 1316 break;
1317 up_read(&vcpu->kvm->slots_lock); 1317 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1318 1318
1319 vcpu_put(vcpu); 1319 vcpu_put(vcpu);
1320 1320
@@ -3900,14 +3900,15 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
3900static void vapic_exit(struct kvm_vcpu *vcpu) 3900static void vapic_exit(struct kvm_vcpu *vcpu)
3901{ 3901{
3902 struct kvm_lapic *apic = vcpu->arch.apic; 3902 struct kvm_lapic *apic = vcpu->arch.apic;
3903 int idx;
3903 3904
3904 if (!apic || !apic->vapic_addr) 3905 if (!apic || !apic->vapic_addr)
3905 return; 3906 return;
3906 3907
3907 down_read(&vcpu->kvm->slots_lock); 3908 idx = srcu_read_lock(&vcpu->kvm->srcu);
3908 kvm_release_page_dirty(apic->vapic_page); 3909 kvm_release_page_dirty(apic->vapic_page);
3909 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 3910 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
3910 up_read(&vcpu->kvm->slots_lock); 3911 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3911} 3912}
3912 3913
3913static void update_cr8_intercept(struct kvm_vcpu *vcpu) 3914static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -4036,7 +4037,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4036 kvm_lapic_sync_to_vapic(vcpu); 4037 kvm_lapic_sync_to_vapic(vcpu);
4037 } 4038 }
4038 4039
4039 up_read(&vcpu->kvm->slots_lock); 4040 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4040 4041
4041 kvm_guest_enter(); 4042 kvm_guest_enter();
4042 4043
@@ -4078,7 +4079,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4078 4079
4079 preempt_enable(); 4080 preempt_enable();
4080 4081
4081 down_read(&vcpu->kvm->slots_lock); 4082 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4082 4083
4083 /* 4084 /*
4084 * Profile KVM exit RIPs: 4085 * Profile KVM exit RIPs:
@@ -4100,6 +4101,7 @@ out:
4100static int __vcpu_run(struct kvm_vcpu *vcpu) 4101static int __vcpu_run(struct kvm_vcpu *vcpu)
4101{ 4102{
4102 int r; 4103 int r;
4104 struct kvm *kvm = vcpu->kvm;
4103 4105
4104 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { 4106 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
4105 pr_debug("vcpu %d received sipi with vector # %x\n", 4107 pr_debug("vcpu %d received sipi with vector # %x\n",
@@ -4111,7 +4113,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
4111 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4113 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4112 } 4114 }
4113 4115
4114 down_read(&vcpu->kvm->slots_lock); 4116 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4115 vapic_enter(vcpu); 4117 vapic_enter(vcpu);
4116 4118
4117 r = 1; 4119 r = 1;
@@ -4119,9 +4121,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
4119 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) 4121 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
4120 r = vcpu_enter_guest(vcpu); 4122 r = vcpu_enter_guest(vcpu);
4121 else { 4123 else {
4122 up_read(&vcpu->kvm->slots_lock); 4124 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4123 kvm_vcpu_block(vcpu); 4125 kvm_vcpu_block(vcpu);
4124 down_read(&vcpu->kvm->slots_lock); 4126 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4125 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) 4127 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
4126 { 4128 {
4127 switch(vcpu->arch.mp_state) { 4129 switch(vcpu->arch.mp_state) {
@@ -4156,13 +4158,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
4156 ++vcpu->stat.signal_exits; 4158 ++vcpu->stat.signal_exits;
4157 } 4159 }
4158 if (need_resched()) { 4160 if (need_resched()) {
4159 up_read(&vcpu->kvm->slots_lock); 4161 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4160 kvm_resched(vcpu); 4162 kvm_resched(vcpu);
4161 down_read(&vcpu->kvm->slots_lock); 4163 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4162 } 4164 }
4163 } 4165 }
4164 4166
4165 up_read(&vcpu->kvm->slots_lock); 4167 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4166 post_kvm_run_save(vcpu); 4168 post_kvm_run_save(vcpu);
4167 4169
4168 vapic_exit(vcpu); 4170 vapic_exit(vcpu);
@@ -4201,10 +4203,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4201 vcpu->mmio_read_completed = 1; 4203 vcpu->mmio_read_completed = 1;
4202 vcpu->mmio_needed = 0; 4204 vcpu->mmio_needed = 0;
4203 4205
4204 down_read(&vcpu->kvm->slots_lock); 4206 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4205 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0, 4207 r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
4206 EMULTYPE_NO_DECODE); 4208 EMULTYPE_NO_DECODE);
4207 up_read(&vcpu->kvm->slots_lock); 4209 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4208 if (r == EMULATE_DO_MMIO) { 4210 if (r == EMULATE_DO_MMIO) {
4209 /* 4211 /*
4210 * Read-modify-write. Back to userspace. 4212 * Read-modify-write. Back to userspace.
@@ -4967,11 +4969,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4967{ 4969{
4968 unsigned long vaddr = tr->linear_address; 4970 unsigned long vaddr = tr->linear_address;
4969 gpa_t gpa; 4971 gpa_t gpa;
4972 int idx;
4970 4973
4971 vcpu_load(vcpu); 4974 vcpu_load(vcpu);
4972 down_read(&vcpu->kvm->slots_lock); 4975 idx = srcu_read_lock(&vcpu->kvm->srcu);
4973 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); 4976 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
4974 up_read(&vcpu->kvm->slots_lock); 4977 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4975 tr->physical_address = gpa; 4978 tr->physical_address = gpa;
4976 tr->valid = gpa != UNMAPPED_GVA; 4979 tr->valid = gpa != UNMAPPED_GVA;
4977 tr->writeable = 1; 4980 tr->writeable = 1;
@@ -5223,11 +5226,13 @@ fail:
5223 5226
5224void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 5227void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5225{ 5228{
5229 int idx;
5230
5226 kfree(vcpu->arch.mce_banks); 5231 kfree(vcpu->arch.mce_banks);
5227 kvm_free_lapic(vcpu); 5232 kvm_free_lapic(vcpu);
5228 down_read(&vcpu->kvm->slots_lock); 5233 idx = srcu_read_lock(&vcpu->kvm->srcu);
5229 kvm_mmu_destroy(vcpu); 5234 kvm_mmu_destroy(vcpu);
5230 up_read(&vcpu->kvm->slots_lock); 5235 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5231 free_page((unsigned long)vcpu->arch.pio_data); 5236 free_page((unsigned long)vcpu->arch.pio_data);
5232} 5237}
5233 5238
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5e9cb902550b..0bb9aa295e6c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -83,6 +83,8 @@ struct kvm_vcpu {
83 struct kvm_run *run; 83 struct kvm_run *run;
84 unsigned long requests; 84 unsigned long requests;
85 unsigned long guest_debug; 85 unsigned long guest_debug;
86 int srcu_idx;
87
86 int fpu_active; 88 int fpu_active;
87 int guest_fpu_loaded; 89 int guest_fpu_loaded;
88 wait_queue_head_t wq; 90 wait_queue_head_t wq;