aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-05-13 04:25:04 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:47 -0400
commit2122ff5eab8faec853e43f6de886e8dc8f31e317 (patch)
treee2a31431281f255c6dd38a79ffba4f051fd5b20d /arch/x86/kvm/x86.c
parent1683b2416e4c514d30ff5844a06733d0444ee000 (diff)
KVM: move vcpu locking to dispatcher for generic vcpu ioctls
All vcpu ioctls need to be locked, so instead of locking each one specifically we lock at the generic dispatcher. This patch only updates generic ioctls and leaves arch specific ioctls alone. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c40
1 files changed, 2 insertions, 38 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b05321adfd2f..5acd21245fc7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4773,8 +4773,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4773 int r; 4773 int r;
4774 sigset_t sigsaved; 4774 sigset_t sigsaved;
4775 4775
4776 vcpu_load(vcpu);
4777
4778 if (vcpu->sigset_active) 4776 if (vcpu->sigset_active)
4779 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 4777 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4780 4778
@@ -4815,14 +4813,11 @@ out:
4815 if (vcpu->sigset_active) 4813 if (vcpu->sigset_active)
4816 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 4814 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4817 4815
4818 vcpu_put(vcpu);
4819 return r; 4816 return r;
4820} 4817}
4821 4818
4822int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 4819int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4823{ 4820{
4824 vcpu_load(vcpu);
4825
4826 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); 4821 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4827 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); 4822 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4828 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); 4823 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
@@ -4845,15 +4840,11 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4845 regs->rip = kvm_rip_read(vcpu); 4840 regs->rip = kvm_rip_read(vcpu);
4846 regs->rflags = kvm_get_rflags(vcpu); 4841 regs->rflags = kvm_get_rflags(vcpu);
4847 4842
4848 vcpu_put(vcpu);
4849
4850 return 0; 4843 return 0;
4851} 4844}
4852 4845
4853int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 4846int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4854{ 4847{
4855 vcpu_load(vcpu);
4856
4857 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); 4848 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4858 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); 4849 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4859 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); 4850 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
@@ -4878,8 +4869,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4878 4869
4879 vcpu->arch.exception.pending = false; 4870 vcpu->arch.exception.pending = false;
4880 4871
4881 vcpu_put(vcpu);
4882
4883 return 0; 4872 return 0;
4884} 4873}
4885 4874
@@ -4898,8 +4887,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4898{ 4887{
4899 struct desc_ptr dt; 4888 struct desc_ptr dt;
4900 4889
4901 vcpu_load(vcpu);
4902
4903 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 4890 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4904 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 4891 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4905 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 4892 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
@@ -4931,26 +4918,20 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4931 set_bit(vcpu->arch.interrupt.nr, 4918 set_bit(vcpu->arch.interrupt.nr,
4932 (unsigned long *)sregs->interrupt_bitmap); 4919 (unsigned long *)sregs->interrupt_bitmap);
4933 4920
4934 vcpu_put(vcpu);
4935
4936 return 0; 4921 return 0;
4937} 4922}
4938 4923
4939int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 4924int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4940 struct kvm_mp_state *mp_state) 4925 struct kvm_mp_state *mp_state)
4941{ 4926{
4942 vcpu_load(vcpu);
4943 mp_state->mp_state = vcpu->arch.mp_state; 4927 mp_state->mp_state = vcpu->arch.mp_state;
4944 vcpu_put(vcpu);
4945 return 0; 4928 return 0;
4946} 4929}
4947 4930
4948int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 4931int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4949 struct kvm_mp_state *mp_state) 4932 struct kvm_mp_state *mp_state)
4950{ 4933{
4951 vcpu_load(vcpu);
4952 vcpu->arch.mp_state = mp_state->mp_state; 4934 vcpu->arch.mp_state = mp_state->mp_state;
4953 vcpu_put(vcpu);
4954 return 0; 4935 return 0;
4955} 4936}
4956 4937
@@ -4996,8 +4977,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4996 int pending_vec, max_bits; 4977 int pending_vec, max_bits;
4997 struct desc_ptr dt; 4978 struct desc_ptr dt;
4998 4979
4999 vcpu_load(vcpu);
5000
5001 dt.size = sregs->idt.limit; 4980 dt.size = sregs->idt.limit;
5002 dt.address = sregs->idt.base; 4981 dt.address = sregs->idt.base;
5003 kvm_x86_ops->set_idt(vcpu, &dt); 4982 kvm_x86_ops->set_idt(vcpu, &dt);
@@ -5057,8 +5036,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5057 !is_protmode(vcpu)) 5036 !is_protmode(vcpu))
5058 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 5037 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5059 5038
5060 vcpu_put(vcpu);
5061
5062 return 0; 5039 return 0;
5063} 5040}
5064 5041
@@ -5068,12 +5045,10 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5068 unsigned long rflags; 5045 unsigned long rflags;
5069 int i, r; 5046 int i, r;
5070 5047
5071 vcpu_load(vcpu);
5072
5073 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 5048 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5074 r = -EBUSY; 5049 r = -EBUSY;
5075 if (vcpu->arch.exception.pending) 5050 if (vcpu->arch.exception.pending)
5076 goto unlock_out; 5051 goto out;
5077 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 5052 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5078 kvm_queue_exception(vcpu, DB_VECTOR); 5053 kvm_queue_exception(vcpu, DB_VECTOR);
5079 else 5054 else
@@ -5115,8 +5090,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5115 5090
5116 r = 0; 5091 r = 0;
5117 5092
5118unlock_out: 5093out:
5119 vcpu_put(vcpu);
5120 5094
5121 return r; 5095 return r;
5122} 5096}
@@ -5152,7 +5126,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5152 gpa_t gpa; 5126 gpa_t gpa;
5153 int idx; 5127 int idx;
5154 5128
5155 vcpu_load(vcpu);
5156 idx = srcu_read_lock(&vcpu->kvm->srcu); 5129 idx = srcu_read_lock(&vcpu->kvm->srcu);
5157 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 5130 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5158 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5131 srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -5160,7 +5133,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5160 tr->valid = gpa != UNMAPPED_GVA; 5133 tr->valid = gpa != UNMAPPED_GVA;
5161 tr->writeable = 1; 5134 tr->writeable = 1;
5162 tr->usermode = 0; 5135 tr->usermode = 0;
5163 vcpu_put(vcpu);
5164 5136
5165 return 0; 5137 return 0;
5166} 5138}
@@ -5169,8 +5141,6 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5169{ 5141{
5170 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image; 5142 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
5171 5143
5172 vcpu_load(vcpu);
5173
5174 memcpy(fpu->fpr, fxsave->st_space, 128); 5144 memcpy(fpu->fpr, fxsave->st_space, 128);
5175 fpu->fcw = fxsave->cwd; 5145 fpu->fcw = fxsave->cwd;
5176 fpu->fsw = fxsave->swd; 5146 fpu->fsw = fxsave->swd;
@@ -5180,8 +5150,6 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5180 fpu->last_dp = fxsave->rdp; 5150 fpu->last_dp = fxsave->rdp;
5181 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); 5151 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5182 5152
5183 vcpu_put(vcpu);
5184
5185 return 0; 5153 return 0;
5186} 5154}
5187 5155
@@ -5189,8 +5157,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5189{ 5157{
5190 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image; 5158 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
5191 5159
5192 vcpu_load(vcpu);
5193
5194 memcpy(fxsave->st_space, fpu->fpr, 128); 5160 memcpy(fxsave->st_space, fpu->fpr, 128);
5195 fxsave->cwd = fpu->fcw; 5161 fxsave->cwd = fpu->fcw;
5196 fxsave->swd = fpu->fsw; 5162 fxsave->swd = fpu->fsw;
@@ -5200,8 +5166,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5200 fxsave->rdp = fpu->last_dp; 5166 fxsave->rdp = fpu->last_dp;
5201 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); 5167 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5202 5168
5203 vcpu_put(vcpu);
5204
5205 return 0; 5169 return 0;
5206} 5170}
5207 5171